hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
70de18d38202ddb9d069dc73617adc34eef26526.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
__global__ void reverse(int * a, int* b){
}
void print_array(int* array, int size){
int i = 0;
for(i; i < size; i++){
printf("%lf ",array[i]);
}
}
#define N 512
int main(void){
int size = 5*sizeof(int);
int d_in[5] = {100, 110, 200, 220, 300};
int * dev_in, dev_out;
int d_out[5];
printf("Prima dll'inversione");
print_arrray(d_in, 5);
//allocate device copy of d_in
hipMalloc( (void**)&dev_in, size );
hipMalloc( (void**)$dev_out, size);
//copy input to device
hipMemcpy(dev_in, &d_in, size, hipMemcpyHostToDevice );
//launch reverse kernel
hipLaunchKernelGGL(( reverse), dim3(1), dim3(1), 0, 0, dev_in, dev_out);
//copy device result into host memory
hipMemcpy(dev_out, &d_out, size, hipMemcpyDeviceToHost);
hipFree(dev_in);
hipFree(dev_out);
printf("Dopo l'inversione");
print_array(d_out);
return 0;
}
| 70de18d38202ddb9d069dc73617adc34eef26526.cu | #include <stdio.h>
#include <math.h>
__global__ void reverse(int * a, int* b){
}
void print_array(int* array, int size){
int i = 0;
for(i; i < size; i++){
printf("%lf ",array[i]);
}
}
#define N 512
int main(void){
int size = 5*sizeof(int);
int d_in[5] = {100, 110, 200, 220, 300};
int * dev_in, dev_out;
int d_out[5];
printf("Prima dll'inversione");
print_arrray(d_in, 5);
//allocate device copy of d_in
cudaMalloc( (void**)&dev_in, size );
cudaMalloc( (void**)$dev_out, size);
//copy input to device
cudaMemcpy(dev_in, &d_in, size, cudaMemcpyHostToDevice );
//launch reverse kernel
reverse<<<1, 1>>>(dev_in, dev_out);
//copy device result into host memory
cudaMemcpy(dev_out, &d_out, size, cudaMemcpyDeviceToHost);
cudaFree(dev_in);
cudaFree(dev_out);
printf("Dopo l'inversione");
print_array(d_out);
return 0;
}
|
fd54c71d7d12d7f6ffc18ed752f94c70035de5e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
template <typename U, typename V>
constexpr __host__ __device__ auto divUp(U a, V b) -> decltype(a + b) {
return (a + b - 1) / b;
}
template <int FS, int SB, int padding_l, typename scalar_t>
__inline__ __device__ void zeroSharedMem(scalar_t* data) {
/*
Given an array of length FS + SB, zero out the first padding_l and last
(FS - padding_l) values in the array
*/
int tid = threadIdx.x;
if (FS < SB) {
// zero all if we have enough threads in a block to do all of them
if (tid < padding_l || tid > SB - FS + padding_l - 1) {
data[tid] = scalar_t(0.0);
}
} else {
// otherwise zero out one block at a time
const int numIterations = divUp<int, int>(FS, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if (tid + offset < padding_l) {
data[tid + offset] = scalar_t(0.0);
} else if (tid + offset < FS) {
data[SB + tid + offset] = scalar_t(0.0);
}
}
}
}
template <typename scalar_t>
__inline__ __device__ scalar_t warpReduce(scalar_t data) {
/*
Reduce an array within each warp. After processing all values in warp will
caontain the sum of all original values in that warp.
data - pointer to data to reduce
*/
data += __shfl_xor_sync(SHFL_MASK, data, 16);
data += __shfl_xor_sync(SHFL_MASK, data, 8);
data += __shfl_xor_sync(SHFL_MASK, data, 4);
data += __shfl_xor_sync(SHFL_MASK, data, 2);
data += __shfl_xor_sync(SHFL_MASK, data, 1);
return data;
}
template <typename scalar_t>
__inline__ __device__ scalar_t blockReduce(scalar_t data) {
/*
Reduce an entire array on the block level. After processing, the
first value in the array will contain the reduced sum.
data - pointer to data to reduce
*/
static __shared__ scalar_t warpSum[32];
const int tid = threadIdx.x;
int wid = tid / 32;
int lane = tid % 32;
__syncthreads();
// reduce each warp then write to shared memory
scalar_t sum = warpReduce(data);
if (lane == 0) {
warpSum[wid] = sum;
}
__syncthreads();
scalar_t v;
// perform final sum of partial warp sums
if (tid < blockDim.x / 32) {
v = warpSum[lane];
} else {
v = scalar_t(0.0);
}
if (wid == 0) {
v = warpReduce(v);
}
__syncthreads();
return v;
}
void checkCudaStatus(hipError_t status, int lineNumber = -1) {
if (status != hipSuccess) {
std::cout << hipGetErrorString(status) << " at line " << lineNumber
<< std::endl;
std::cout << "Exiting" << std::endl;
exit(1);
}
}
template <int FS, int SB, int padding_l, typename scalar_t>
__device__ void load_input_to_shared(
const scalar_t* input, // global memory
int inputOffset,
int sequenceLength,
int iteration,
int numIterations,
bool no_prev,
scalar_t* output /* shared memory */) {
/*
Load a block size of input into shared memory with
right and left overhang of total size FS. If previously
loaded memory, overlap will be shifted over to reduce
global memory access
input - pointer to start of channel sequence
inputOffset - how far in the sequence to start loading
sequenceLength - total length of sequence
iteration - which block of sequence we are loading
numIterations - total number of blocks to load
no_prev - whether to load the whole block if the previous block
wasn't loaded
output - shared memory to write input to
*/
const int tid = threadIdx.x;
// Load the left "overhang" of input
if (iteration > 0) {
if (padding_l < SB) {
// load all at once
if (tid < padding_l) {
output[tid] =
(no_prev) ? input[inputOffset - padding_l + tid] : output[tid + SB];
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < padding_l) {
output[tid + offset] = (no_prev)
? input[inputOffset - padding_l + tid + offset]
: output[tid + offset + SB];
}
}
}
}
// Load the right "overhang" of input
if (iteration < (numIterations - 1)) {
const int elementsLeft = sequenceLength - (iteration + 1) * SB;
if ((FS - padding_l) < SB) {
// load all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = (tid < elementsLeft)
? input[inputOffset + SB + tid]
: scalar_t(0.0);
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] =
((tid + offset) < elementsLeft)
? input[inputOffset + SB + tid + offset]
: scalar_t(0.0);
}
}
}
}
// We should also clear out the right "overhang"
if (iteration == (numIterations - 1)) {
if ((FS - padding_l) < SB) {
// clear out all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = scalar_t(0.0);
}
} else {
// clear in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] = scalar_t(0.0);
}
}
}
}
output[tid + padding_l] = ((inputOffset + tid) < sequenceLength)
? input[inputOffset + tid]
: scalar_t(0.0);
}
| fd54c71d7d12d7f6ffc18ed752f94c70035de5e4.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
template <typename U, typename V>
constexpr __host__ __device__ auto divUp(U a, V b) -> decltype(a + b) {
return (a + b - 1) / b;
}
template <int FS, int SB, int padding_l, typename scalar_t>
__inline__ __device__ void zeroSharedMem(scalar_t* data) {
/*
Given an array of length FS + SB, zero out the first padding_l and last
(FS - padding_l) values in the array
*/
int tid = threadIdx.x;
if (FS < SB) {
// zero all if we have enough threads in a block to do all of them
if (tid < padding_l || tid > SB - FS + padding_l - 1) {
data[tid] = scalar_t(0.0);
}
} else {
// otherwise zero out one block at a time
const int numIterations = divUp<int, int>(FS, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if (tid + offset < padding_l) {
data[tid + offset] = scalar_t(0.0);
} else if (tid + offset < FS) {
data[SB + tid + offset] = scalar_t(0.0);
}
}
}
}
template <typename scalar_t>
__inline__ __device__ scalar_t warpReduce(scalar_t data) {
/*
Reduce an array within each warp. After processing all values in warp will
caontain the sum of all original values in that warp.
data - pointer to data to reduce
*/
data += __shfl_xor_sync(SHFL_MASK, data, 16);
data += __shfl_xor_sync(SHFL_MASK, data, 8);
data += __shfl_xor_sync(SHFL_MASK, data, 4);
data += __shfl_xor_sync(SHFL_MASK, data, 2);
data += __shfl_xor_sync(SHFL_MASK, data, 1);
return data;
}
template <typename scalar_t>
__inline__ __device__ scalar_t blockReduce(scalar_t data) {
/*
Reduce an entire array on the block level. After processing, the
first value in the array will contain the reduced sum.
data - pointer to data to reduce
*/
static __shared__ scalar_t warpSum[32];
const int tid = threadIdx.x;
int wid = tid / 32;
int lane = tid % 32;
__syncthreads();
// reduce each warp then write to shared memory
scalar_t sum = warpReduce(data);
if (lane == 0) {
warpSum[wid] = sum;
}
__syncthreads();
scalar_t v;
// perform final sum of partial warp sums
if (tid < blockDim.x / 32) {
v = warpSum[lane];
} else {
v = scalar_t(0.0);
}
if (wid == 0) {
v = warpReduce(v);
}
__syncthreads();
return v;
}
void checkCudaStatus(cudaError_t status, int lineNumber = -1) {
if (status != cudaSuccess) {
std::cout << cudaGetErrorString(status) << " at line " << lineNumber
<< std::endl;
std::cout << "Exiting" << std::endl;
exit(1);
}
}
template <int FS, int SB, int padding_l, typename scalar_t>
__device__ void load_input_to_shared(
const scalar_t* input, // global memory
int inputOffset,
int sequenceLength,
int iteration,
int numIterations,
bool no_prev,
scalar_t* output /* shared memory */) {
/*
Load a block size of input into shared memory with
right and left overhang of total size FS. If previously
loaded memory, overlap will be shifted over to reduce
global memory access
input - pointer to start of channel sequence
inputOffset - how far in the sequence to start loading
sequenceLength - total length of sequence
iteration - which block of sequence we are loading
numIterations - total number of blocks to load
no_prev - whether to load the whole block if the previous block
wasn't loaded
output - shared memory to write input to
*/
const int tid = threadIdx.x;
// Load the left "overhang" of input
if (iteration > 0) {
if (padding_l < SB) {
// load all at once
if (tid < padding_l) {
output[tid] =
(no_prev) ? input[inputOffset - padding_l + tid] : output[tid + SB];
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < padding_l) {
output[tid + offset] = (no_prev)
? input[inputOffset - padding_l + tid + offset]
: output[tid + offset + SB];
}
}
}
}
// Load the right "overhang" of input
if (iteration < (numIterations - 1)) {
const int elementsLeft = sequenceLength - (iteration + 1) * SB;
if ((FS - padding_l) < SB) {
// load all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = (tid < elementsLeft)
? input[inputOffset + SB + tid]
: scalar_t(0.0);
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] =
((tid + offset) < elementsLeft)
? input[inputOffset + SB + tid + offset]
: scalar_t(0.0);
}
}
}
}
// We should also clear out the right "overhang"
if (iteration == (numIterations - 1)) {
if ((FS - padding_l) < SB) {
// clear out all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = scalar_t(0.0);
}
} else {
// clear in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] = scalar_t(0.0);
}
}
}
}
output[tid + padding_l] = ((inputOffset + tid) < sequenceLength)
? input[inputOffset + tid]
: scalar_t(0.0);
}
|
97b54acffd2a66642a4521bed5693a313ff5cdcc.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <ctime>
#include <cfloat>
#include <algorithm>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <map>
#include <memory>
#include <random>
#include <sstream>
#include <string>
#include <vector>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <rocblas.h>
#include <cudnn.h>
#include "read_mnist.h"
#include "common.h"
#include "config.cuh"
#include "layer_hip.cuh"
#include "kernel_hip.cuh"
#include "lenet.cuh"
///////////////////////////////////////////////////////////////////////////////////////////
// Command-line flags
// Application parameters
DEFINE_int32(gpu, 0, "The GPU ID to use");
DEFINE_int32(iterations, 1000, "Number of iterations for training");
DEFINE_int32(random_seed, -1, "Override random seed (default uses std::random_device)");
DEFINE_int32(classify, -1, "Number of images to classify to compute error rate (default uses entire test set)");
// Batch parameters
DEFINE_uint64(batch_size, 64, "Batch size for training");
// Filenames
DEFINE_bool(pretrained, false, "Use the pretrained CUDNN model as input");
DEFINE_bool(save_data, false, "Save pretrained weights to file");
DEFINE_string(train_images, "mnist dataset/train-images-idx3-ubyte", "Training images filename");
DEFINE_string(train_labels, "mnist dataset/train-labels-idx1-ubyte", "Training labels filename");
DEFINE_string(test_images, "mnist dataset/t10k-images-idx3-ubyte", "Test images filename");
DEFINE_string(test_labels, "mnist dataset/t10k-labels-idx1-ubyte", "Test labels filename");
// Solver parameters
DEFINE_double(learning_rate, 0.01, "Base learning rate");
DEFINE_double(lr_gamma, 0.0001, "Learning rate policy gamma");
DEFINE_double(lr_power, 0.75, "Learning rate policy power");
int main(int argc, char **argv)
{
#ifdef USE_GFLAGS
gflags::ParseCommandLineFlags(&argc, &argv, true);
#endif
size_t width, height, channels = 1;
// Open input data
printf("Reading input data\n");
// Read dataset sizes
size_t train_size = readUbyteMnist(FLAGS_train_images.c_str(), FLAGS_train_labels.c_str(), nullptr, nullptr, width, height);
size_t test_size = readUbyteMnist(FLAGS_test_images.c_str(), FLAGS_test_labels.c_str(), nullptr, nullptr, width, height);
if (train_size == 0)
return 1;
std::vector<uint8_t> train_images(train_size * width * height * channels), train_labels(train_size);
std::vector<uint8_t> test_images(test_size * width * height * channels), test_labels(test_size);
// Read data from datasets
if (readUbyteMnist(FLAGS_train_images.c_str(), FLAGS_train_labels.c_str(), &train_images[0], &train_labels[0], width, height) != train_size)
return 2;
if (readUbyteMnist(FLAGS_test_images.c_str(), FLAGS_test_labels.c_str(), &test_images[0], &test_labels[0], width, height) != test_size)
return 3;
printf("Done. Training dataset size: %d, Test dataset size: %d\n", (int)train_size, (int)test_size);
printf("Batch size: %lld, iterations: %d\n", FLAGS_batch_size, FLAGS_iterations);
/*
// This code snippet saves a random image and its label
printf("%d, %d, %d\n", width, height, channels);
std::random_device rd_image;
int random_image = rd_image() % train_size;
std::stringstream ss; ss << "image-" << (int)train_labels[random_image] << ".pgm";
SavePGMFile(&train_images[0]+random_image*width+height, width, height, ss.str().c_str());
*/
// Choose GPU
int num_gpus;
checkCudaErrors(hipGetDeviceCount(&num_gpus));
if (FLAGS_gpu < 0 || FLAGS_gpu >= num_gpus)
{
printf("ERROR: Invalid GPU ID %d (There are %d GPUs on this machine)\n",
FLAGS_gpu, num_gpus);
return 4;
}
// Create the LeNet network architecture
ConvBiasLayer conv1((int)channels, 20, 5, (int)width, (int)height);
MaxPoolLayer pool1(2, 2);
ConvBiasLayer conv2(conv1.out_channels, 50, 5, conv1.out_width / pool1.stride, conv1.out_height / pool1.stride);
MaxPoolLayer pool2(2, 2);
FullyConnectedLayer fc1((conv2.out_channels*conv2.out_width*conv2.out_height) / (pool2.stride * pool2.stride),
500);
FullyConnectedLayer fc2(fc1.outputs, 10);
// Initialize CUDNN/CUBLAS training context
TrainingContext context(FLAGS_gpu, FLAGS_batch_size, conv1, pool1, conv2, pool2, fc1, fc2);
// Determine initial network structure
bool bRet = true;
if (FLAGS_pretrained)
{
bRet = conv1.FromFile("conv1");
bRet &= conv2.FromFile("conv2");
bRet &= fc1.FromFile("ip1");
bRet &= fc2.FromFile("ip2");
}
if (!bRet || !FLAGS_pretrained)
{
// Create random network
std::random_device rd;
std::mt19937 gen(FLAGS_random_seed < 0 ? rd() : static_cast<unsigned int>(FLAGS_random_seed)); // random generate algorithm
// Xavier weight filling
float wconv1 = sqrt(3.0f / (conv1.kernel_size * conv1.kernel_size * conv1.in_channels));
std::uniform_real_distribution<> dconv1(-wconv1, wconv1);
float wconv2 = sqrt(3.0f / (conv2.kernel_size * conv2.kernel_size * conv2.in_channels));
std::uniform_real_distribution<> dconv2(-wconv2, wconv2);
float wfc1 = sqrt(3.0f / (fc1.inputs * fc1.outputs));
std::uniform_real_distribution<> dfc1(-wfc1, wfc1);
float wfc2 = sqrt(3.0f / (fc2.inputs * fc2.outputs));
std::uniform_real_distribution<> dfc2(-wfc2, wfc2);
// Randomize network
for (auto&& iter : conv1.kernel)
iter = static_cast<float>(dconv1(gen));
for (auto&& iter : conv1.bias)
iter = static_cast<float>(dconv1(gen));
for (auto&& iter : conv2.kernel)
iter = static_cast<float>(dconv2(gen));
for (auto&& iter : conv2.bias)
iter = static_cast<float>(dconv2(gen));
for (auto&& iter : fc1.weight)
iter = static_cast<float>(dfc1(gen));
for (auto&& iter : fc1.bias)
iter = static_cast<float>(dfc1(gen));
for (auto&& iter : fc2.weight)
iter = static_cast<float>(dfc2(gen));
for (auto&& iter : fc2.bias)
iter = static_cast<float>(dfc2(gen));
}
/////////////////////////////////////////////////////////////////////////////
// Create GPU data structures
// Forward propagation data
//float *d_data, *d_labels, *d_conv1, *d_pool1, *d_conv2, *d_pool2, *d_fc1, *d_fc1relu, *d_fc2, *d_fc2smax;
float *input_data, *input_labels, *conv1_data, *conv1relu_data,
*pool1_data, *conv2_data, *conv2relu_data, *pool2_data, *fc1_data, *fc1relu_data, *fc2_data, *softmax_data;
// Buffer | Element | N | C | H | W
//-----------------------------------------------------------------------------------------------------------------------------------------
checkCudaErrors(hipMalloc(&input_data, sizeof(float) * context.m_batchSize * channels * height * width));
checkCudaErrors(hipMalloc(&input_labels, sizeof(float) * context.m_batchSize * 1 * 1 * 1));
checkCudaErrors(hipMalloc(&conv1_data, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width));
checkCudaErrors(hipMalloc(&conv1relu_data, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width));
checkCudaErrors(hipMalloc(&pool1_data, sizeof(float) * context.m_batchSize * conv1.out_channels * (conv1.out_height / pool1.stride) * (conv1.out_width / pool1.stride)));
checkCudaErrors(hipMalloc(&conv2_data, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width));
checkCudaErrors(hipMalloc(&conv2relu_data, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width));
checkCudaErrors(hipMalloc(&pool2_data, sizeof(float) * context.m_batchSize * conv2.out_channels * (conv2.out_height / pool2.stride) * (conv2.out_width / pool2.stride)));
checkCudaErrors(hipMalloc(&fc1_data, sizeof(float) * context.m_batchSize * fc1.outputs));
checkCudaErrors(hipMalloc(&fc1relu_data, sizeof(float) * context.m_batchSize * fc1.outputs));
checkCudaErrors(hipMalloc(&fc2_data, sizeof(float) * context.m_batchSize * fc2.outputs));
checkCudaErrors(hipMalloc(&softmax_data, sizeof(float) * context.m_batchSize * fc2.outputs));
// Network parameters
//float *d_pconv1, *d_pconv1bias, *d_pconv2, *d_pconv2bias;
//float *d_pfc1, *d_pfc1bias, *d_pfc2, *d_pfc2bias;
float *conv1_kernel, *conv1_bias, *conv2_kernel, *conv2_bias;
float *fc1_weight, *fc1_bias, *fc2_weight, *fc2_bias;
checkCudaErrors(hipMalloc(&conv1_kernel, sizeof(float) * conv1.kernel.size()));
checkCudaErrors(hipMalloc(&conv1_bias, sizeof(float) * conv1.bias.size()));
checkCudaErrors(hipMalloc(&conv2_kernel, sizeof(float) * conv2.kernel.size()));
checkCudaErrors(hipMalloc(&conv2_bias, sizeof(float) * conv2.bias.size()));
checkCudaErrors(hipMalloc(&fc1_weight, sizeof(float) * fc1.weight.size()));
checkCudaErrors(hipMalloc(&fc1_bias, sizeof(float) * fc1.bias.size()));
checkCudaErrors(hipMalloc(&fc2_weight, sizeof(float) * fc2.weight.size()));
checkCudaErrors(hipMalloc(&fc2_bias, sizeof(float) * fc2.bias.size()));
// Network parameter gradients
//float *d_gconv1, *d_gconv1bias, *d_gconv2, *d_gconv2bias;
//float *d_gfc1, *d_gfc1bias, *d_gfc2, *d_gfc2bias;
float *conv1_kernel_diff, *conv1_bias_diff, *conv2_kernel_diff, *conv2_bias_diff;
float *fc1_weight_diff, *fc1_bias_diff, *fc2_weight_diff, *fc2_bias_diff;
checkCudaErrors(hipMalloc(&conv1_kernel_diff, sizeof(float) * conv1.kernel.size()));
checkCudaErrors(hipMalloc(&conv1_bias_diff, sizeof(float) * conv1.bias.size()));
checkCudaErrors(hipMalloc(&conv2_kernel_diff, sizeof(float) * conv2.kernel.size()));
checkCudaErrors(hipMalloc(&conv2_bias_diff, sizeof(float) * conv2.bias.size()));
checkCudaErrors(hipMalloc(&fc1_weight_diff, sizeof(float) * fc1.weight.size()));
checkCudaErrors(hipMalloc(&fc1_bias_diff, sizeof(float) * fc1.bias.size()));
checkCudaErrors(hipMalloc(&fc2_weight_diff, sizeof(float) * fc2.weight.size()));
checkCudaErrors(hipMalloc(&fc2_bias_diff, sizeof(float) * fc2.bias.size()));
// Differentials w.r.t. data
//float *d_dpool1, *d_dpool2, *d_dconv2, *d_dfc1, *d_dfc1relu, *d_dfc2, *d_dfc2smax, *d_dlossdata;
float *conv1relu_diff, *pool1_diff, *conv2_diff, *conv2relu_diff, *pool2_diff, *fc1_diff, *fc1relu_diff, *fc2_diff, *softmax_diff, *loss;
// Buffer | Element | N | C | H | W
//-----------------------------------------------------------------------------------------------------------------------------------------
checkCudaErrors(hipMalloc(&conv1relu_diff, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width));
checkCudaErrors(hipMalloc(&pool1_diff, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width));
checkCudaErrors(hipMalloc(&conv2_diff, sizeof(float) * context.m_batchSize * conv1.out_channels * (conv1.out_height / pool1.stride) * (conv1.out_width / pool1.stride)));
checkCudaErrors(hipMalloc(&conv2relu_diff, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width));
checkCudaErrors(hipMalloc(&pool2_diff, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width));
checkCudaErrors(hipMalloc(&fc1_diff, sizeof(float) * context.m_batchSize * fc1.inputs));
checkCudaErrors(hipMalloc(&fc1relu_diff, sizeof(float) * context.m_batchSize * fc1.outputs));
checkCudaErrors(hipMalloc(&fc2_diff, sizeof(float) * context.m_batchSize * fc2.inputs));
checkCudaErrors(hipMalloc(&softmax_diff, sizeof(float) * context.m_batchSize * fc2.outputs));
checkCudaErrors(hipMalloc(&loss, sizeof(float) * context.m_batchSize * fc2.outputs));
// Temporary buffers and workspaces
float *d_onevec;
void *d_cudnn_workspace = nullptr;
checkCudaErrors(hipMalloc(&d_onevec, sizeof(float)* context.m_batchSize));
if (context.m_workspaceSize > 0)
checkCudaErrors(hipMalloc(&d_cudnn_workspace, context.m_workspaceSize));
/////////////////////////////////////////////////////////////////////////////
// Copy initial network to device
checkCudaErrors(hipMemcpyAsync(conv1_kernel, &conv1.kernel[0], sizeof(float) * conv1.kernel.size(), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyAsync(conv1_bias, &conv1.bias[0], sizeof(float) * conv1.bias.size(), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyAsync(conv2_kernel, &conv2.kernel[0], sizeof(float) * conv2.kernel.size(), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyAsync(conv2_bias, &conv2.bias[0], sizeof(float) * conv2.bias.size(), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyAsync(fc1_weight, &fc1.weight[0], sizeof(float) * fc1.weight.size(), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyAsync(fc1_bias, &fc1.bias[0], sizeof(float) * fc1.bias.size(), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyAsync(fc2_weight, &fc2.weight[0], sizeof(float) * fc2.weight.size(), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyAsync(fc2_bias, &fc2.bias[0], sizeof(float) * fc2.bias.size(), hipMemcpyHostToDevice));
// Fill one-vector with ones
FillOnes << <RoundUp(context.m_batchSize, BW), BW >> >(d_onevec, context.m_batchSize);
printf("Preparing dataset\n");
// Normalize training set to be in [0,1]
std::vector<float> train_images_float(train_images.size()), train_labels_float(train_size);
for (size_t i = 0; i < train_size * channels * width * height; ++i)
train_images_float[i] = (float)train_images[i] / 255.0f;
for (size_t i = 0; i < train_size; ++i)
train_labels_float[i] = (float)train_labels[i];
printf("Training...\n");
// Use SGD to train the network
checkCudaErrors(hipDeviceSynchronize());
auto t1 = std::chrono::high_resolution_clock::now();
for (int iter = 0; iter < FLAGS_iterations; ++iter)
{
// Train
int imageid = iter % (train_size / context.m_batchSize);
// Prepare current batch on device
checkCudaErrors(hipMemcpyAsync(input_data, &train_images_float[imageid * context.m_batchSize * width*height*channels],
sizeof(float) * context.m_batchSize * channels * width * height, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyAsync(input_labels, &train_labels_float[imageid * context.m_batchSize],
sizeof(float) * context.m_batchSize, hipMemcpyHostToDevice));
// Forward propagation
context.ForwardPropagation(input_data, conv1_data, conv1relu_data, pool1_data, conv2_data, conv2relu_data, pool2_data,
fc1_data, fc1relu_data, fc2_data, softmax_data,
conv1_kernel, conv1_bias, conv2_kernel, conv2_bias, fc1_weight, fc1_bias, fc2_weight, fc2_bias,
d_cudnn_workspace, d_onevec);
// Backward propagation
context.Backpropagation(conv1, pool1, conv2, pool2,
input_data, input_labels, conv1_data, conv1relu_data, pool1_data, conv2_data, conv2relu_data, pool2_data,
fc1_data, fc1relu_data, fc2_data, softmax_data, loss,
conv1_kernel, conv1_bias, conv2_kernel, conv2_bias, fc1_weight, fc1_bias, fc2_weight, fc2_bias,
conv1_kernel_diff, conv1_bias_diff, conv1relu_diff, pool1_diff, conv2_kernel_diff, conv2_bias_diff, conv2_diff,
conv2relu_diff, pool2_diff, fc1_weight_diff, fc1_bias_diff,
fc1_diff, fc1relu_diff, fc2_weight_diff, fc2_bias_diff, fc2_diff, d_cudnn_workspace, d_onevec);
// Printf train loss
std::vector<float> softmax_vec(context.m_batchSize * fc2.outputs);
// Copy back loss
checkCudaErrors(hipMemcpy(&softmax_vec[0], softmax_data, sizeof(float) * context.m_batchSize * fc2.outputs, hipMemcpyDeviceToHost));
const float* _label = &train_labels_float[imageid * context.m_batchSize];
float num_errors = 0.0;
for (int _i = 0; _i < context.m_batchSize; _i++) {
const float* _softmax = &softmax_vec[0] + _i * fc2.outputs;
int chosen = 0;
for (int id = 1; id < 10; ++id) {
if (_softmax[chosen] < _softmax[id]) chosen = id;
}
if (chosen != _label[_i]) ++num_errors;
}
printf("%d iter, train error: %f\n", iter, num_errors / context.m_batchSize);
// Compute learning rate
float learningRate = static_cast<float>(FLAGS_learning_rate * pow((1.0 + FLAGS_lr_gamma * iter), (-FLAGS_lr_power)));
// Update weights
context.UpdateWeights(learningRate, conv1, conv2,
conv1_kernel, conv1_bias, conv2_kernel, conv2_bias, fc1_weight, fc1_bias, fc2_weight, fc2_bias,
conv1_kernel_diff, conv1_bias_diff, conv2_kernel_diff, conv2_bias_diff,
fc1_weight_diff, fc1_bias_diff, fc2_weight_diff, fc2_bias_diff);
}
checkCudaErrors(hipDeviceSynchronize());
auto t2 = std::chrono::high_resolution_clock::now();
printf("Iteration time: %f ms\n", std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() / 1000.0f / FLAGS_iterations);
// Save model
if (FLAGS_save_data)
{
// Copy trained weights from GPU to CPU
checkCudaErrors(hipMemcpy(&conv1.kernel[0], conv1_kernel, sizeof(float) * conv1.kernel.size(), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&conv1.bias[0], conv1_bias, sizeof(float) * conv1.bias.size(), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&conv2.kernel[0], conv2_kernel, sizeof(float) * conv2.kernel.size(), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&conv2.bias[0], conv2_bias, sizeof(float) * conv2.bias.size(), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&fc1.weight[0], fc1_weight, sizeof(float) * fc1.weight.size(), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&fc1.bias[0], fc1_bias, sizeof(float) * fc1.bias.size(), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&fc2.weight[0], fc2_weight, sizeof(float) * fc2.weight.size(), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&fc2.bias[0], fc2_bias, sizeof(float) * fc2.bias.size(), hipMemcpyDeviceToHost));
// Now save data
printf("Saving data to file\n");
conv1.ToFile("conv1");
conv2.ToFile("conv2");
fc1.ToFile("ip1");
fc2.ToFile("ip2");
}
float classification_error = 1.0f;
int classifications = FLAGS_classify;
if (classifications < 0)
classifications = (int)test_size;
// Test the resulting neural network's classification
if (classifications > 0)
{
// Initialize a TrainingContext structure for testing (different batch size)
TrainingContext test_context(FLAGS_gpu, 1, conv1, pool1, conv2, pool2, fc1, fc2);
// Ensure correct workspaceSize is allocated for testing
if (context.m_workspaceSize < test_context.m_workspaceSize)
{
checkCudaErrors(hipFree(d_cudnn_workspace));
checkCudaErrors(hipMalloc(&d_cudnn_workspace, test_context.m_workspaceSize));
}
int num_errors = 0;
for (int i = 0; i < classifications; ++i)
{
std::vector<float> data(width * height);
// Normalize image to be in [0,1]
for (int j = 0; j < width * height; ++j)
data[j] = (float)test_images[i * width*height*channels + j] / 255.0f;
checkCudaErrors(hipMemcpyAsync(input_data, &data[0], sizeof(float) * width * height, hipMemcpyHostToDevice));
// Forward propagate test image
test_context.ForwardPropagation(input_data, conv1_data, conv1relu_data, pool1_data, conv2_data, conv2relu_data, pool2_data, fc1_data,
fc1relu_data, fc2_data, softmax_data,
conv1_kernel, conv1_bias, conv2_kernel, conv2_bias, fc1_weight, fc1_bias,
fc2_weight, fc2_bias, d_cudnn_workspace, d_onevec);
// Perform classification
std::vector<float> class_vec(10);
// Copy back result
checkCudaErrors(hipMemcpy(&class_vec[0], softmax_data, sizeof(float) * 10, hipMemcpyDeviceToHost));
// Determine classification according to maximal response
int chosen = 0;
for (int id = 1; id < 10; ++id)
{
if (class_vec[chosen] < class_vec[id]) chosen = id;
}
if (chosen != test_labels[i])
++num_errors;
}
classification_error = (float)num_errors / (float)classifications;
printf("Classification result: %.2f%% error (used %d images)\n", classification_error * 100.0f, (int)classifications);
}
// Free data structures
checkCudaErrors(hipFree(input_data));
checkCudaErrors(hipFree(conv1_data));
checkCudaErrors(hipFree(conv1relu_data));
checkCudaErrors(hipFree(pool1_data));
checkCudaErrors(hipFree(conv2_data));
checkCudaErrors(hipFree(conv2relu_data));
checkCudaErrors(hipFree(pool2_data));
checkCudaErrors(hipFree(fc1_data));
checkCudaErrors(hipFree(fc2_data));
checkCudaErrors(hipFree(conv1_kernel));
checkCudaErrors(hipFree(conv1_bias));
checkCudaErrors(hipFree(conv2_kernel));
checkCudaErrors(hipFree(conv2_bias));
checkCudaErrors(hipFree(fc1_weight));
checkCudaErrors(hipFree(fc1_bias));
checkCudaErrors(hipFree(fc2_weight));
checkCudaErrors(hipFree(fc2_bias));
checkCudaErrors(hipFree(conv1_kernel_diff));
checkCudaErrors(hipFree(conv1_bias_diff));
checkCudaErrors(hipFree(conv2_kernel_diff));
checkCudaErrors(hipFree(conv2_bias_diff));
checkCudaErrors(hipFree(fc1_weight_diff));
checkCudaErrors(hipFree(fc1_bias_diff));
checkCudaErrors(hipFree(fc2_weight_diff));
checkCudaErrors(hipFree(fc2_bias_diff));
checkCudaErrors(hipFree(conv1relu_diff));
checkCudaErrors(hipFree(pool1_diff));
checkCudaErrors(hipFree(conv2_diff));
checkCudaErrors(hipFree(conv2relu_diff));
checkCudaErrors(hipFree(pool2_diff));
checkCudaErrors(hipFree(fc1_diff));
checkCudaErrors(hipFree(fc2_diff));
checkCudaErrors(hipFree(input_labels));
checkCudaErrors(hipFree(loss));
checkCudaErrors(hipFree(d_onevec));
if (d_cudnn_workspace != nullptr)
checkCudaErrors(hipFree(d_cudnn_workspace));
return 0;
}
| 97b54acffd2a66642a4521bed5693a313ff5cdcc.cu |
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <ctime>
#include <cfloat>
#include <algorithm>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <map>
#include <memory>
#include <random>
#include <sstream>
#include <string>
#include <vector>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cublas_v2.h>
#include <cudnn.h>
#include "read_mnist.h"
#include "common.h"
#include "config.cuh"
#include "layer.cuh"
#include "kernel.cuh"
#include "lenet.cuh"
///////////////////////////////////////////////////////////////////////////////////////////
// Command-line flags
// Application parameters
DEFINE_int32(gpu, 0, "The GPU ID to use");
DEFINE_int32(iterations, 1000, "Number of iterations for training");
DEFINE_int32(random_seed, -1, "Override random seed (default uses std::random_device)");
DEFINE_int32(classify, -1, "Number of images to classify to compute error rate (default uses entire test set)");
// Batch parameters
DEFINE_uint64(batch_size, 64, "Batch size for training");
// Filenames
DEFINE_bool(pretrained, false, "Use the pretrained CUDNN model as input");
DEFINE_bool(save_data, false, "Save pretrained weights to file");
DEFINE_string(train_images, "mnist dataset/train-images-idx3-ubyte", "Training images filename");
DEFINE_string(train_labels, "mnist dataset/train-labels-idx1-ubyte", "Training labels filename");
DEFINE_string(test_images, "mnist dataset/t10k-images-idx3-ubyte", "Test images filename");
DEFINE_string(test_labels, "mnist dataset/t10k-labels-idx1-ubyte", "Test labels filename");
// Solver parameters
DEFINE_double(learning_rate, 0.01, "Base learning rate");
DEFINE_double(lr_gamma, 0.0001, "Learning rate policy gamma");
DEFINE_double(lr_power, 0.75, "Learning rate policy power");
int main(int argc, char **argv)
{
#ifdef USE_GFLAGS
gflags::ParseCommandLineFlags(&argc, &argv, true);
#endif
size_t width, height, channels = 1;
// Open input data
printf("Reading input data\n");
// Read dataset sizes
size_t train_size = readUbyteMnist(FLAGS_train_images.c_str(), FLAGS_train_labels.c_str(), nullptr, nullptr, width, height);
size_t test_size = readUbyteMnist(FLAGS_test_images.c_str(), FLAGS_test_labels.c_str(), nullptr, nullptr, width, height);
if (train_size == 0)
return 1;
std::vector<uint8_t> train_images(train_size * width * height * channels), train_labels(train_size);
std::vector<uint8_t> test_images(test_size * width * height * channels), test_labels(test_size);
// Read data from datasets
if (readUbyteMnist(FLAGS_train_images.c_str(), FLAGS_train_labels.c_str(), &train_images[0], &train_labels[0], width, height) != train_size)
return 2;
if (readUbyteMnist(FLAGS_test_images.c_str(), FLAGS_test_labels.c_str(), &test_images[0], &test_labels[0], width, height) != test_size)
return 3;
printf("Done. Training dataset size: %d, Test dataset size: %d\n", (int)train_size, (int)test_size);
printf("Batch size: %lld, iterations: %d\n", FLAGS_batch_size, FLAGS_iterations);
/*
// This code snippet saves a random image and its label
printf("%d, %d, %d\n", width, height, channels);
std::random_device rd_image;
int random_image = rd_image() % train_size;
std::stringstream ss; ss << "image-" << (int)train_labels[random_image] << ".pgm";
SavePGMFile(&train_images[0]+random_image*width+height, width, height, ss.str().c_str());
*/
// Choose GPU
int num_gpus;
checkCudaErrors(cudaGetDeviceCount(&num_gpus));
if (FLAGS_gpu < 0 || FLAGS_gpu >= num_gpus)
{
printf("ERROR: Invalid GPU ID %d (There are %d GPUs on this machine)\n",
FLAGS_gpu, num_gpus);
return 4;
}
// Create the LeNet network architecture
ConvBiasLayer conv1((int)channels, 20, 5, (int)width, (int)height);
MaxPoolLayer pool1(2, 2);
ConvBiasLayer conv2(conv1.out_channels, 50, 5, conv1.out_width / pool1.stride, conv1.out_height / pool1.stride);
MaxPoolLayer pool2(2, 2);
FullyConnectedLayer fc1((conv2.out_channels*conv2.out_width*conv2.out_height) / (pool2.stride * pool2.stride),
500);
FullyConnectedLayer fc2(fc1.outputs, 10);
// Initialize CUDNN/CUBLAS training context
TrainingContext context(FLAGS_gpu, FLAGS_batch_size, conv1, pool1, conv2, pool2, fc1, fc2);
// Determine initial network structure
bool bRet = true;
if (FLAGS_pretrained)
{
bRet = conv1.FromFile("conv1");
bRet &= conv2.FromFile("conv2");
bRet &= fc1.FromFile("ip1");
bRet &= fc2.FromFile("ip2");
}
if (!bRet || !FLAGS_pretrained)
{
// Create random network
std::random_device rd;
std::mt19937 gen(FLAGS_random_seed < 0 ? rd() : static_cast<unsigned int>(FLAGS_random_seed)); // random generate algorithm
// Xavier weight filling
float wconv1 = sqrt(3.0f / (conv1.kernel_size * conv1.kernel_size * conv1.in_channels));
std::uniform_real_distribution<> dconv1(-wconv1, wconv1);
float wconv2 = sqrt(3.0f / (conv2.kernel_size * conv2.kernel_size * conv2.in_channels));
std::uniform_real_distribution<> dconv2(-wconv2, wconv2);
float wfc1 = sqrt(3.0f / (fc1.inputs * fc1.outputs));
std::uniform_real_distribution<> dfc1(-wfc1, wfc1);
float wfc2 = sqrt(3.0f / (fc2.inputs * fc2.outputs));
std::uniform_real_distribution<> dfc2(-wfc2, wfc2);
// Randomize network
for (auto&& iter : conv1.kernel)
iter = static_cast<float>(dconv1(gen));
for (auto&& iter : conv1.bias)
iter = static_cast<float>(dconv1(gen));
for (auto&& iter : conv2.kernel)
iter = static_cast<float>(dconv2(gen));
for (auto&& iter : conv2.bias)
iter = static_cast<float>(dconv2(gen));
for (auto&& iter : fc1.weight)
iter = static_cast<float>(dfc1(gen));
for (auto&& iter : fc1.bias)
iter = static_cast<float>(dfc1(gen));
for (auto&& iter : fc2.weight)
iter = static_cast<float>(dfc2(gen));
for (auto&& iter : fc2.bias)
iter = static_cast<float>(dfc2(gen));
}
/////////////////////////////////////////////////////////////////////////////
// Create GPU data structures
// Forward propagation data
//float *d_data, *d_labels, *d_conv1, *d_pool1, *d_conv2, *d_pool2, *d_fc1, *d_fc1relu, *d_fc2, *d_fc2smax;
float *input_data, *input_labels, *conv1_data, *conv1relu_data,
*pool1_data, *conv2_data, *conv2relu_data, *pool2_data, *fc1_data, *fc1relu_data, *fc2_data, *softmax_data;
// Buffer | Element | N | C | H | W
//-----------------------------------------------------------------------------------------------------------------------------------------
checkCudaErrors(cudaMalloc(&input_data, sizeof(float) * context.m_batchSize * channels * height * width));
checkCudaErrors(cudaMalloc(&input_labels, sizeof(float) * context.m_batchSize * 1 * 1 * 1));
checkCudaErrors(cudaMalloc(&conv1_data, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width));
checkCudaErrors(cudaMalloc(&conv1relu_data, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width));
checkCudaErrors(cudaMalloc(&pool1_data, sizeof(float) * context.m_batchSize * conv1.out_channels * (conv1.out_height / pool1.stride) * (conv1.out_width / pool1.stride)));
checkCudaErrors(cudaMalloc(&conv2_data, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width));
checkCudaErrors(cudaMalloc(&conv2relu_data, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width));
checkCudaErrors(cudaMalloc(&pool2_data, sizeof(float) * context.m_batchSize * conv2.out_channels * (conv2.out_height / pool2.stride) * (conv2.out_width / pool2.stride)));
checkCudaErrors(cudaMalloc(&fc1_data, sizeof(float) * context.m_batchSize * fc1.outputs));
checkCudaErrors(cudaMalloc(&fc1relu_data, sizeof(float) * context.m_batchSize * fc1.outputs));
checkCudaErrors(cudaMalloc(&fc2_data, sizeof(float) * context.m_batchSize * fc2.outputs));
checkCudaErrors(cudaMalloc(&softmax_data, sizeof(float) * context.m_batchSize * fc2.outputs));
// Network parameters
//float *d_pconv1, *d_pconv1bias, *d_pconv2, *d_pconv2bias;
//float *d_pfc1, *d_pfc1bias, *d_pfc2, *d_pfc2bias;
float *conv1_kernel, *conv1_bias, *conv2_kernel, *conv2_bias;
float *fc1_weight, *fc1_bias, *fc2_weight, *fc2_bias;
checkCudaErrors(cudaMalloc(&conv1_kernel, sizeof(float) * conv1.kernel.size()));
checkCudaErrors(cudaMalloc(&conv1_bias, sizeof(float) * conv1.bias.size()));
checkCudaErrors(cudaMalloc(&conv2_kernel, sizeof(float) * conv2.kernel.size()));
checkCudaErrors(cudaMalloc(&conv2_bias, sizeof(float) * conv2.bias.size()));
checkCudaErrors(cudaMalloc(&fc1_weight, sizeof(float) * fc1.weight.size()));
checkCudaErrors(cudaMalloc(&fc1_bias, sizeof(float) * fc1.bias.size()));
checkCudaErrors(cudaMalloc(&fc2_weight, sizeof(float) * fc2.weight.size()));
checkCudaErrors(cudaMalloc(&fc2_bias, sizeof(float) * fc2.bias.size()));
// Network parameter gradients
//float *d_gconv1, *d_gconv1bias, *d_gconv2, *d_gconv2bias;
//float *d_gfc1, *d_gfc1bias, *d_gfc2, *d_gfc2bias;
float *conv1_kernel_diff, *conv1_bias_diff, *conv2_kernel_diff, *conv2_bias_diff;
float *fc1_weight_diff, *fc1_bias_diff, *fc2_weight_diff, *fc2_bias_diff;
checkCudaErrors(cudaMalloc(&conv1_kernel_diff, sizeof(float) * conv1.kernel.size()));
checkCudaErrors(cudaMalloc(&conv1_bias_diff, sizeof(float) * conv1.bias.size()));
checkCudaErrors(cudaMalloc(&conv2_kernel_diff, sizeof(float) * conv2.kernel.size()));
checkCudaErrors(cudaMalloc(&conv2_bias_diff, sizeof(float) * conv2.bias.size()));
checkCudaErrors(cudaMalloc(&fc1_weight_diff, sizeof(float) * fc1.weight.size()));
checkCudaErrors(cudaMalloc(&fc1_bias_diff, sizeof(float) * fc1.bias.size()));
checkCudaErrors(cudaMalloc(&fc2_weight_diff, sizeof(float) * fc2.weight.size()));
checkCudaErrors(cudaMalloc(&fc2_bias_diff, sizeof(float) * fc2.bias.size()));
// Differentials w.r.t. data
//float *d_dpool1, *d_dpool2, *d_dconv2, *d_dfc1, *d_dfc1relu, *d_dfc2, *d_dfc2smax, *d_dlossdata;
float *conv1relu_diff, *pool1_diff, *conv2_diff, *conv2relu_diff, *pool2_diff, *fc1_diff, *fc1relu_diff, *fc2_diff, *softmax_diff, *loss;
// Buffer | Element | N | C | H | W
//-----------------------------------------------------------------------------------------------------------------------------------------
checkCudaErrors(cudaMalloc(&conv1relu_diff, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width));
checkCudaErrors(cudaMalloc(&pool1_diff, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width));
checkCudaErrors(cudaMalloc(&conv2_diff, sizeof(float) * context.m_batchSize * conv1.out_channels * (conv1.out_height / pool1.stride) * (conv1.out_width / pool1.stride)));
checkCudaErrors(cudaMalloc(&conv2relu_diff, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width));
checkCudaErrors(cudaMalloc(&pool2_diff, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width));
checkCudaErrors(cudaMalloc(&fc1_diff, sizeof(float) * context.m_batchSize * fc1.inputs));
checkCudaErrors(cudaMalloc(&fc1relu_diff, sizeof(float) * context.m_batchSize * fc1.outputs));
checkCudaErrors(cudaMalloc(&fc2_diff, sizeof(float) * context.m_batchSize * fc2.inputs));
checkCudaErrors(cudaMalloc(&softmax_diff, sizeof(float) * context.m_batchSize * fc2.outputs));
checkCudaErrors(cudaMalloc(&loss, sizeof(float) * context.m_batchSize * fc2.outputs));
// Temporary buffers and workspaces
float *d_onevec;
void *d_cudnn_workspace = nullptr;
checkCudaErrors(cudaMalloc(&d_onevec, sizeof(float)* context.m_batchSize));
if (context.m_workspaceSize > 0)
checkCudaErrors(cudaMalloc(&d_cudnn_workspace, context.m_workspaceSize));
/////////////////////////////////////////////////////////////////////////////
// Copy initial network to device
checkCudaErrors(cudaMemcpyAsync(conv1_kernel, &conv1.kernel[0], sizeof(float) * conv1.kernel.size(), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyAsync(conv1_bias, &conv1.bias[0], sizeof(float) * conv1.bias.size(), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyAsync(conv2_kernel, &conv2.kernel[0], sizeof(float) * conv2.kernel.size(), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyAsync(conv2_bias, &conv2.bias[0], sizeof(float) * conv2.bias.size(), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyAsync(fc1_weight, &fc1.weight[0], sizeof(float) * fc1.weight.size(), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyAsync(fc1_bias, &fc1.bias[0], sizeof(float) * fc1.bias.size(), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyAsync(fc2_weight, &fc2.weight[0], sizeof(float) * fc2.weight.size(), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyAsync(fc2_bias, &fc2.bias[0], sizeof(float) * fc2.bias.size(), cudaMemcpyHostToDevice));
// Fill one-vector with ones
FillOnes << <RoundUp(context.m_batchSize, BW), BW >> >(d_onevec, context.m_batchSize);
printf("Preparing dataset\n");
// Normalize training set to be in [0,1]
std::vector<float> train_images_float(train_images.size()), train_labels_float(train_size);
for (size_t i = 0; i < train_size * channels * width * height; ++i)
train_images_float[i] = (float)train_images[i] / 255.0f;
for (size_t i = 0; i < train_size; ++i)
train_labels_float[i] = (float)train_labels[i];
printf("Training...\n");
// Use SGD to train the network
checkCudaErrors(cudaDeviceSynchronize());
auto t1 = std::chrono::high_resolution_clock::now();
for (int iter = 0; iter < FLAGS_iterations; ++iter)
{
// Train
int imageid = iter % (train_size / context.m_batchSize);
// Prepare current batch on device
checkCudaErrors(cudaMemcpyAsync(input_data, &train_images_float[imageid * context.m_batchSize * width*height*channels],
sizeof(float) * context.m_batchSize * channels * width * height, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyAsync(input_labels, &train_labels_float[imageid * context.m_batchSize],
sizeof(float) * context.m_batchSize, cudaMemcpyHostToDevice));
// Forward propagation
context.ForwardPropagation(input_data, conv1_data, conv1relu_data, pool1_data, conv2_data, conv2relu_data, pool2_data,
fc1_data, fc1relu_data, fc2_data, softmax_data,
conv1_kernel, conv1_bias, conv2_kernel, conv2_bias, fc1_weight, fc1_bias, fc2_weight, fc2_bias,
d_cudnn_workspace, d_onevec);
// Backward propagation
context.Backpropagation(conv1, pool1, conv2, pool2,
input_data, input_labels, conv1_data, conv1relu_data, pool1_data, conv2_data, conv2relu_data, pool2_data,
fc1_data, fc1relu_data, fc2_data, softmax_data, loss,
conv1_kernel, conv1_bias, conv2_kernel, conv2_bias, fc1_weight, fc1_bias, fc2_weight, fc2_bias,
conv1_kernel_diff, conv1_bias_diff, conv1relu_diff, pool1_diff, conv2_kernel_diff, conv2_bias_diff, conv2_diff,
conv2relu_diff, pool2_diff, fc1_weight_diff, fc1_bias_diff,
fc1_diff, fc1relu_diff, fc2_weight_diff, fc2_bias_diff, fc2_diff, d_cudnn_workspace, d_onevec);
// Printf train loss
std::vector<float> softmax_vec(context.m_batchSize * fc2.outputs);
// Copy back loss
checkCudaErrors(cudaMemcpy(&softmax_vec[0], softmax_data, sizeof(float) * context.m_batchSize * fc2.outputs, cudaMemcpyDeviceToHost));
const float* _label = &train_labels_float[imageid * context.m_batchSize];
float num_errors = 0.0;
for (int _i = 0; _i < context.m_batchSize; _i++) {
const float* _softmax = &softmax_vec[0] + _i * fc2.outputs;
int chosen = 0;
for (int id = 1; id < 10; ++id) {
if (_softmax[chosen] < _softmax[id]) chosen = id;
}
if (chosen != _label[_i]) ++num_errors;
}
printf("%d iter, train error: %f\n", iter, num_errors / context.m_batchSize);
// Compute learning rate
float learningRate = static_cast<float>(FLAGS_learning_rate * pow((1.0 + FLAGS_lr_gamma * iter), (-FLAGS_lr_power)));
// Update weights
context.UpdateWeights(learningRate, conv1, conv2,
conv1_kernel, conv1_bias, conv2_kernel, conv2_bias, fc1_weight, fc1_bias, fc2_weight, fc2_bias,
conv1_kernel_diff, conv1_bias_diff, conv2_kernel_diff, conv2_bias_diff,
fc1_weight_diff, fc1_bias_diff, fc2_weight_diff, fc2_bias_diff);
}
checkCudaErrors(cudaDeviceSynchronize());
auto t2 = std::chrono::high_resolution_clock::now();
printf("Iteration time: %f ms\n", std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() / 1000.0f / FLAGS_iterations);
// Save model
if (FLAGS_save_data)
{
// Copy trained weights from GPU to CPU
checkCudaErrors(cudaMemcpy(&conv1.kernel[0], conv1_kernel, sizeof(float) * conv1.kernel.size(), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&conv1.bias[0], conv1_bias, sizeof(float) * conv1.bias.size(), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&conv2.kernel[0], conv2_kernel, sizeof(float) * conv2.kernel.size(), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&conv2.bias[0], conv2_bias, sizeof(float) * conv2.bias.size(), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&fc1.weight[0], fc1_weight, sizeof(float) * fc1.weight.size(), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&fc1.bias[0], fc1_bias, sizeof(float) * fc1.bias.size(), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&fc2.weight[0], fc2_weight, sizeof(float) * fc2.weight.size(), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&fc2.bias[0], fc2_bias, sizeof(float) * fc2.bias.size(), cudaMemcpyDeviceToHost));
// Now save data
printf("Saving data to file\n");
conv1.ToFile("conv1");
conv2.ToFile("conv2");
fc1.ToFile("ip1");
fc2.ToFile("ip2");
}
float classification_error = 1.0f;
int classifications = FLAGS_classify;
if (classifications < 0)
classifications = (int)test_size;
// Test the resulting neural network's classification
if (classifications > 0)
{
// Initialize a TrainingContext structure for testing (different batch size)
TrainingContext test_context(FLAGS_gpu, 1, conv1, pool1, conv2, pool2, fc1, fc2);
// Ensure correct workspaceSize is allocated for testing
if (context.m_workspaceSize < test_context.m_workspaceSize)
{
checkCudaErrors(cudaFree(d_cudnn_workspace));
checkCudaErrors(cudaMalloc(&d_cudnn_workspace, test_context.m_workspaceSize));
}
int num_errors = 0;
for (int i = 0; i < classifications; ++i)
{
std::vector<float> data(width * height);
// Normalize image to be in [0,1]
for (int j = 0; j < width * height; ++j)
data[j] = (float)test_images[i * width*height*channels + j] / 255.0f;
checkCudaErrors(cudaMemcpyAsync(input_data, &data[0], sizeof(float) * width * height, cudaMemcpyHostToDevice));
// Forward propagate test image
test_context.ForwardPropagation(input_data, conv1_data, conv1relu_data, pool1_data, conv2_data, conv2relu_data, pool2_data, fc1_data,
fc1relu_data, fc2_data, softmax_data,
conv1_kernel, conv1_bias, conv2_kernel, conv2_bias, fc1_weight, fc1_bias,
fc2_weight, fc2_bias, d_cudnn_workspace, d_onevec);
// Perform classification
std::vector<float> class_vec(10);
// Copy back result
checkCudaErrors(cudaMemcpy(&class_vec[0], softmax_data, sizeof(float) * 10, cudaMemcpyDeviceToHost));
// Determine classification according to maximal response
int chosen = 0;
for (int id = 1; id < 10; ++id)
{
if (class_vec[chosen] < class_vec[id]) chosen = id;
}
if (chosen != test_labels[i])
++num_errors;
}
classification_error = (float)num_errors / (float)classifications;
printf("Classification result: %.2f%% error (used %d images)\n", classification_error * 100.0f, (int)classifications);
}
// Free data structures
checkCudaErrors(cudaFree(input_data));
checkCudaErrors(cudaFree(conv1_data));
checkCudaErrors(cudaFree(conv1relu_data));
checkCudaErrors(cudaFree(pool1_data));
checkCudaErrors(cudaFree(conv2_data));
checkCudaErrors(cudaFree(conv2relu_data));
checkCudaErrors(cudaFree(pool2_data));
checkCudaErrors(cudaFree(fc1_data));
checkCudaErrors(cudaFree(fc2_data));
checkCudaErrors(cudaFree(conv1_kernel));
checkCudaErrors(cudaFree(conv1_bias));
checkCudaErrors(cudaFree(conv2_kernel));
checkCudaErrors(cudaFree(conv2_bias));
checkCudaErrors(cudaFree(fc1_weight));
checkCudaErrors(cudaFree(fc1_bias));
checkCudaErrors(cudaFree(fc2_weight));
checkCudaErrors(cudaFree(fc2_bias));
checkCudaErrors(cudaFree(conv1_kernel_diff));
checkCudaErrors(cudaFree(conv1_bias_diff));
checkCudaErrors(cudaFree(conv2_kernel_diff));
checkCudaErrors(cudaFree(conv2_bias_diff));
checkCudaErrors(cudaFree(fc1_weight_diff));
checkCudaErrors(cudaFree(fc1_bias_diff));
checkCudaErrors(cudaFree(fc2_weight_diff));
checkCudaErrors(cudaFree(fc2_bias_diff));
checkCudaErrors(cudaFree(conv1relu_diff));
checkCudaErrors(cudaFree(pool1_diff));
checkCudaErrors(cudaFree(conv2_diff));
checkCudaErrors(cudaFree(conv2relu_diff));
checkCudaErrors(cudaFree(pool2_diff));
checkCudaErrors(cudaFree(fc1_diff));
checkCudaErrors(cudaFree(fc2_diff));
checkCudaErrors(cudaFree(input_labels));
checkCudaErrors(cudaFree(loss));
checkCudaErrors(cudaFree(d_onevec));
if (d_cudnn_workspace != nullptr)
checkCudaErrors(cudaFree(d_cudnn_workspace));
return 0;
}
|
d41c7e26c0feb65f857c65d76c4a4bc7b3576505.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020 NVIDIA Corporation.
* Copyright (c) 2018-2020 Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#ifndef GPU_POOLING_AVG
#define GPU_POOLING_AVG
#include <hipsparse.h>
#include <limits>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include "allocators.cuh"
#include "pooling_avg_kernel.cuh"
#include "utils.hpp"
namespace minkowski {
template <typename Dtype>
__global__ void fill(const int n, Dtype *in_feat, Dtype val) {
CUDA_KERNEL_LOOP(index, n) { in_feat[index] = val; }
}
template <typename Dtype>
__global__ void col2row_major(const int n, const int nrows, const int ncols,
const Dtype *colA, Dtype *rowA) {
int i, j;
CUDA_KERNEL_LOOP(index, n) {
i = index % nrows;
j = index / nrows;
rowA[i * ncols + j] = colA[index];
}
}
template <typename Dtype>
__global__ void col2row_major_with_div(const int n, const int nrows,
const int ncols,
const Dtype *num_nonzero,
const Dtype *colA, Dtype *rowA) {
int i, j;
CUDA_KERNEL_LOOP(index, n) {
i = index % nrows;
j = index / nrows;
if (num_nonzero[i] >= 1) {
rowA[i * ncols + j] = colA[index] / num_nonzero[i];
} else {
rowA[i * ncols + j] = colA[index];
}
}
}
template <typename Itype, typename Dtype>
__global__ void
unique_row2num_nonzero(const int n, Dtype *__restrict__ d_num_nonzero,
const Itype *__restrict__ unique_row_ptr,
const Dtype *__restrict__ reduced_val_ptr) {
CUDA_KERNEL_LOOP(index, n) {
d_num_nonzero[unique_row_ptr[index]] = reduced_val_ptr[index];
}
}
template <typename Dtype, typename Itype>
__global__ void set_gradient(const int n, const Dtype *d_grad_out,
Dtype *d_grad_in, const Itype *out_index,
int nchannel) {
CUDA_KERNEL_LOOP(index, n) {
atomicAdd(&d_grad_in[out_index[index]], d_grad_out[index]);
}
}
template <typename Dtype, typename Itype>
__global__ void
set_gradient_nonzero(const int n, const Dtype *d_grad_out, Dtype *d_grad_in,
int nchannel, const Itype *in_map, const Itype *out_map) {
CUDA_KERNEL_LOOP(index, n) {
int nrow = index / nchannel;
int ch = index % nchannel;
atomicAdd(&d_grad_in[in_map[nrow] * nchannel + ch],
d_grad_out[out_map[nrow] * nchannel + ch]);
}
}
template <typename Dtype, typename Itype>
__global__ void
set_gradient_nonzero_avg(const int n, const Dtype *d_grad_out, Dtype *d_grad_in,
int nchannel, const Dtype *d_num_nonzero,
const Itype *in_map, const Itype *out_map) {
CUDA_KERNEL_LOOP(index, n) {
int nrow = index / nchannel;
int ch = index % nchannel;
int curr_num_nonzero = d_num_nonzero[out_map[nrow]];
if (curr_num_nonzero >= 1)
atomicAdd(&d_grad_in[in_map[nrow] * nchannel + ch],
d_grad_out[out_map[nrow] * nchannel + ch] / curr_num_nonzero);
}
}
template <typename Dtype, typename Itype, typename ByteAllocator>
void NonzeroAvgPoolingForwardKernelGPU(
Dtype const *d_in_feat, //
default_types::size_type const in_nrows, //
Dtype *d_out_feat, //
default_types::size_type const out_nrows, //
Dtype *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<Itype, ByteAllocator> const &kernel_map, //
bool const use_avg, //
ByteAllocator &allocator, //
hipsparseHandle_t cushandle, hipStream_t stream) {
const Dtype alpha = 1;
const Dtype beta = 0;
static_assert(sizeof(Itype) == sizeof(int),
"cusparse requires int type index");
Dtype *d_ones, *d_coo_val, *d_tmp_out_feat;
constexpr bool is_int32 = sizeof(Itype) == sizeof(int32_t);
constexpr bool is_int64 = sizeof(Itype) == sizeof(int64_t);
constexpr bool is_float32 = std::is_same<Dtype, float>::value;
hipDataType cuda_data_type = is_float32 ? HIP_R_32F : HIP_R_64F;
hipsparseSpMMAlg_t mm_alg;
#if defined(CUDART_VERSION) && (CUDART_VERSION < 10010)
ASSERT(false, "spmm sparse-dense requires CUDA 10.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) && \
(CUDART_VERSION < 11000)
mm_alg = HIPSPARSE_COOMM_ALG1;
static_assert(is_int32, "int64 hipsparseSpMM requires CUDA 11.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 11000)
mm_alg = HIPSPARSE_SPMM_COO_ALG1;
static_assert(is_int32 || is_int64, "Invalid index type");
#endif
/* sparse mm prep */
size_t const sparse_nnzs =
kernel_map.in_maps.end() - kernel_map.in_maps.begin();
static_assert(is_int32, "sort_coo supports int32");
sort_coo_gpu<ByteAllocator>(cushandle, out_nrows, in_nrows, sparse_nnzs,
(int *)kernel_map.out_maps.begin(),
(int *)kernel_map.in_maps.begin(), allocator);
// feature output
d_tmp_out_feat =
(Dtype *)allocator.allocate(nchannel * out_nrows * sizeof(Dtype));
d_coo_val = (Dtype *)allocator.allocate(sparse_nnzs * sizeof(Dtype));
hipLaunchKernelGGL(( fill<Dtype>), dim3(GET_BLOCKS(sparse_nnzs, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0,
stream, sparse_nnzs, d_coo_val, (Dtype)1.);
if (use_avg) {
d_ones = (Dtype *)allocator.allocate(sparse_nnzs * sizeof(Dtype));
hipLaunchKernelGGL(( fill<Dtype>), dim3(GET_BLOCKS(sparse_nnzs, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS),
0, stream, sparse_nnzs, d_ones, (Dtype)1.);
}
#ifdef DEBUG
std::cout << "sparse_nnzs: " << sparse_nnzs << "\n";
Itype *p_scr = (Itype *)std::malloc((sparse_nnzs)*2 * sizeof(Itype));
CUDA_CHECK(hipMemcpy(p_scr, kernel_map.out_maps.begin(),
sparse_nnzs * sizeof(Itype), hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(p_scr + sparse_nnzs, kernel_map.in_maps.begin(),
sparse_nnzs * sizeof(Itype), hipMemcpyDeviceToHost));
Itype step = std::max<Itype>(sparse_nnzs / 100, 1);
Itype i = 0;
for (; i < sparse_nnzs;) {
std::cout << i;
std::cout << " out_map: " << p_scr[i]
<< ", in_map: " << p_scr[i + sparse_nnzs] << "\n";
i += step;
}
i -= step;
for (; i < sparse_nnzs; ++i) {
std::cout << i;
std::cout << " out_map: " << p_scr[i]
<< ", in_map: " << p_scr[i + sparse_nnzs] << "\n";
}
std::free(p_scr);
std::cout << "done printing\n";
#endif
Itype *sorted_row_ptr =
(Itype *)allocator.allocate(2 * (sparse_nnzs + 1) * sizeof(Itype));
Itype *sorted_col_ptr = sorted_row_ptr + sparse_nnzs + 1;
CUDA_CHECK(hipMemcpy(sorted_row_ptr, kernel_map.out_maps.begin(),
sparse_nnzs * sizeof(Itype), hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(sorted_col_ptr, kernel_map.in_maps.begin(),
sparse_nnzs * sizeof(Itype), hipMemcpyDeviceToDevice));
thrust::sort_by_key(thrust::device, //
sorted_row_ptr, // key begin
sorted_row_ptr + sparse_nnzs, // key end
sorted_col_ptr);
// +---------+ +---+
// | spm | | i |
// +---------+ | n |
// in_nrows | |
// | F |
// | |
// +---+
// nchannel
size_t dim_i = out_nrows, dim_j = in_nrows, dim_k = nchannel;
hipsparseSpMatDescr_t sparse_descr;
hipsparseDnMatDescr_t dense_descr;
hipsparseDnMatDescr_t result_descr;
CUSPARSE_CHECK(
hipsparseCreateCoo(&sparse_descr, //
dim_i, dim_j, sparse_nnzs, //
sorted_row_ptr, // rows
sorted_col_ptr, // cols
d_coo_val, // coo vals
is_int32 ? HIPSPARSE_INDEX_32I : HIPSPARSE_INDEX_64I,
HIPSPARSE_INDEX_BASE_ZERO, cuda_data_type));
CUSPARSE_CHECK(hipsparseCreateDnMat(&dense_descr, //
dim_k, dim_j, dim_k, //
(void *)d_in_feat, //
cuda_data_type, HIPSPARSE_ORDER_COL));
CUSPARSE_CHECK(hipsparseCreateDnMat(&result_descr, //
dim_i, dim_k, dim_i, //
(void *)d_tmp_out_feat, //
cuda_data_type, HIPSPARSE_ORDER_COL));
size_t buffer_size = 0;
CUSPARSE_CHECK(hipsparseSpMM_bufferSize(
cushandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE,
(void *)&alpha, sparse_descr, dense_descr, (void *)&beta, result_descr,
cuda_data_type, mm_alg, &buffer_size));
// buffer size 0 for HIPSPARSE_SPMM_COO_ALG1, CUSPARSE_SPMM_COO_ALG3,
// CUSPARSE_SPMM_COO_ALG4, and HIPSPARSE_CSRMM_ALG1
// WARNING: coo sorting must have been handled in the kernel map
// decomposition.
CUSPARSE_CHECK(hipsparseSpMM(cushandle, //
HIPSPARSE_OPERATION_NON_TRANSPOSE, //
HIPSPARSE_OPERATION_TRANSPOSE, //
(void *)&alpha, //
sparse_descr, dense_descr, //
(void *)&beta, result_descr, //
cuda_data_type, mm_alg, &buffer_size));
#ifdef DEBUG
CUDA_CHECK(hipStreamSynchronize(0));
#endif
LOG_DEBUG("SPMM");
if (use_avg) {
Itype *unique_row_ptr =
(Itype *)allocator.allocate(sparse_nnzs * sizeof(Itype));
Dtype *reduced_val_ptr =
(Dtype *)allocator.allocate(sparse_nnzs * sizeof(Dtype));
// reduce by key
auto end = thrust::reduce_by_key(thrust::device, // policy
sorted_row_ptr, // key begin
sorted_row_ptr + sparse_nnzs, // key end
d_ones, // value begin
unique_row_ptr, // key out begin
reduced_val_ptr // value out begin
);
int num_unique_keys = end.first - unique_row_ptr;
LOG_DEBUG("Num unique keys:", num_unique_keys);
#ifdef DEBUG
Itype *p_unique_row = (Itype *)std::malloc(num_unique_keys * sizeof(Itype));
CUDA_CHECK(hipMemcpy(p_unique_row, unique_row_ptr,
num_unique_keys * sizeof(Itype),
hipMemcpyDeviceToHost));
std::cout << "[" << PtrToString(p_unique_row, num_unique_keys) << "]\n";
std::free(p_unique_row);
Dtype *p_reduced_val =
(Dtype *)std::malloc(num_unique_keys * sizeof(Dtype));
CUDA_CHECK(hipMemcpy(p_reduced_val, reduced_val_ptr,
num_unique_keys * sizeof(Dtype),
hipMemcpyDeviceToHost));
std::cout << "[" << PtrToString(p_reduced_val, num_unique_keys) << "]\n";
std::free(p_reduced_val);
#endif
// Copy the results to the correct output
hipLaunchKernelGGL(( unique_row2num_nonzero<Itype, Dtype>)
, dim3(GET_BLOCKS(num_unique_keys, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0,
stream, num_unique_keys, d_num_nonzero, unique_row_ptr,
reduced_val_ptr);
hipLaunchKernelGGL(( col2row_major_with_div<Dtype>)
, dim3(GET_BLOCKS(out_nrows * nchannel, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS),
0, stream, out_nrows * nchannel, out_nrows, nchannel,
d_num_nonzero, d_tmp_out_feat, d_out_feat);
#ifdef DEBUG
CUDA_CHECK(hipStreamSynchronize(0));
#endif
LOG_DEBUG("col2row");
// Delete tmp spaces
allocator.deallocate((char *)unique_row_ptr, sparse_nnzs * sizeof(Itype));
allocator.deallocate((char *)reduced_val_ptr, sparse_nnzs * sizeof(Dtype));
} else {
hipLaunchKernelGGL(( col2row_major<Dtype>), dim3(GET_BLOCKS(out_nrows * nchannel, CUDA_NUM_THREADS)),
dim3(CUDA_NUM_THREADS), 0, stream,
out_nrows * nchannel, out_nrows, nchannel, d_tmp_out_feat, d_out_feat);
}
CUSPARSE_CHECK(hipsparseDestroySpMat(sparse_descr));
CUSPARSE_CHECK(hipsparseDestroyDnMat(dense_descr));
CUSPARSE_CHECK(hipsparseDestroyDnMat(result_descr));
allocator.deallocate((char *)d_coo_val, sparse_nnzs * sizeof(Dtype));
allocator.deallocate((char *)d_tmp_out_feat,
nchannel * out_nrows * sizeof(Dtype));
if (use_avg)
allocator.deallocate((char *)d_ones, in_nrows * sizeof(Dtype));
allocator.deallocate((char *)sorted_row_ptr,
2 * (sparse_nnzs + 1) * sizeof(Itype));
CUDA_CHECK(hipStreamSynchronize(0));
}
// default_allocator
template void
NonzeroAvgPoolingForwardKernelGPU<float, uint32_t,
detail::default_allocator<char>>(
float const *d_in_feat, //
default_types::size_type const in_nrows, //
float *d_out_feat, //
default_types::size_type const out_nrows, //
float *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::default_allocator<char>> const
&kernel_map, //
bool const use_avg,
detail::default_allocator<char> &allocator, //
hipsparseHandle_t cushandle, hipStream_t stream);
template void
NonzeroAvgPoolingForwardKernelGPU<double, uint32_t,
detail::default_allocator<char>>(
double const *d_in_feat, //
default_types::size_type const in_nrows, //
double *d_out_feat, //
default_types::size_type const out_nrows, //
double *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::default_allocator<char>> const
&kernel_map, //
bool const use_avg,
detail::default_allocator<char> &allocator, //
hipsparseHandle_t cushandle, hipStream_t stream);
// c10_allocator
template void
NonzeroAvgPoolingForwardKernelGPU<float, uint32_t, detail::c10_allocator<char>>(
float const *d_in_feat, //
default_types::size_type const in_nrows, //
float *d_out_feat, //
default_types::size_type const out_nrows, //
float *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::c10_allocator<char>> const &kernel_map, //
bool const use_avg,
detail::c10_allocator<char> &allocator, //
hipsparseHandle_t cushandle, hipStream_t stream);
template void NonzeroAvgPoolingForwardKernelGPU<double, uint32_t,
detail::c10_allocator<char>>(
double const *d_in_feat, //
default_types::size_type const in_nrows, //
double *d_out_feat, //
default_types::size_type const out_nrows, //
double *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::c10_allocator<char>> const &kernel_map, //
bool const use_avg,
detail::c10_allocator<char> &allocator, //
hipsparseHandle_t cushandle, hipStream_t stream);
// Backward
template <typename Dtype, typename Itype, typename ByteAllocator>
void NonzeroAvgPoolingBackwardKernelGPU(
Dtype *d_grad_in_feat, //
default_types::size_type const in_nrows, //
Dtype const *d_grad_out_feat, //
default_types::size_type const out_nrows, //
Dtype const *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<Itype, ByteAllocator> const &kernel_map, bool const use_avg,
hipStream_t stream) {
// d_grad_in_feat must be all set to 0
size_t sparse_nnzs = kernel_map.in_maps.end() - kernel_map.in_maps.begin();
if (use_avg) {
hipLaunchKernelGGL(( set_gradient_nonzero_avg<Dtype>)
, dim3(GET_BLOCKS(sparse_nnzs * nchannel, CUDA_NUM_THREADS)),
dim3(CUDA_NUM_THREADS), 0, stream,
sparse_nnzs * nchannel, d_grad_out_feat, d_grad_in_feat, nchannel,
d_num_nonzero, kernel_map.in_maps.cdata(),
kernel_map.out_maps.cdata());
} else {
hipLaunchKernelGGL(( set_gradient_nonzero<Dtype>)
, dim3(GET_BLOCKS(sparse_nnzs * nchannel, CUDA_NUM_THREADS)),
dim3(CUDA_NUM_THREADS), 0, stream,
sparse_nnzs * nchannel, d_grad_out_feat, d_grad_in_feat, nchannel,
kernel_map.in_maps.cdata(), kernel_map.out_maps.cdata());
}
CUDA_CHECK(hipDeviceSynchronize());
}
// default_allocator
template void
NonzeroAvgPoolingBackwardKernelGPU<float, uint32_t,
detail::default_allocator<char>>(
float *d_grad_in_feat, //
default_types::size_type const in_nrows, //
float const *d_grad_out_feat, //
default_types::size_type const out_nrows, //
float const *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::default_allocator<char>> const &kernel_map,
bool const use_avg, hipStream_t stream);
template void
NonzeroAvgPoolingBackwardKernelGPU<double, uint32_t,
detail::default_allocator<char>>(
double *d_grad_in_feat, //
default_types::size_type const in_nrows, //
double const *d_grad_out_feat, //
default_types::size_type const out_nrows, //
double const *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::default_allocator<char>> const &kernel_map,
bool const use_avg, hipStream_t stream);
// c10_allocator
template void NonzeroAvgPoolingBackwardKernelGPU<float, uint32_t,
detail::c10_allocator<char>>(
float *d_grad_in_feat, //
default_types::size_type const in_nrows, //
float const *d_grad_out_feat, //
default_types::size_type const out_nrows, //
float const *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::c10_allocator<char>> const &kernel_map,
bool const use_avg, hipStream_t stream);
template void NonzeroAvgPoolingBackwardKernelGPU<double, uint32_t,
detail::c10_allocator<char>>(
double *d_grad_in_feat, //
default_types::size_type const in_nrows, //
double const *d_grad_out_feat, //
default_types::size_type const out_nrows, //
double const *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::c10_allocator<char>> const &kernel_map,
bool const use_avg, hipStream_t stream);
} // end namespace minkowski
#endif // end GPU_POOLING_AVG
| d41c7e26c0feb65f857c65d76c4a4bc7b3576505.cu | /*
* Copyright (c) 2020 NVIDIA Corporation.
* Copyright (c) 2018-2020 Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#ifndef GPU_POOLING_AVG
#define GPU_POOLING_AVG
#include <cusparse.h>
#include <limits>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include "allocators.cuh"
#include "pooling_avg_kernel.cuh"
#include "utils.hpp"
namespace minkowski {
template <typename Dtype>
__global__ void fill(const int n, Dtype *in_feat, Dtype val) {
CUDA_KERNEL_LOOP(index, n) { in_feat[index] = val; }
}
template <typename Dtype>
__global__ void col2row_major(const int n, const int nrows, const int ncols,
const Dtype *colA, Dtype *rowA) {
int i, j;
CUDA_KERNEL_LOOP(index, n) {
i = index % nrows;
j = index / nrows;
rowA[i * ncols + j] = colA[index];
}
}
template <typename Dtype>
__global__ void col2row_major_with_div(const int n, const int nrows,
const int ncols,
const Dtype *num_nonzero,
const Dtype *colA, Dtype *rowA) {
int i, j;
CUDA_KERNEL_LOOP(index, n) {
i = index % nrows;
j = index / nrows;
if (num_nonzero[i] >= 1) {
rowA[i * ncols + j] = colA[index] / num_nonzero[i];
} else {
rowA[i * ncols + j] = colA[index];
}
}
}
template <typename Itype, typename Dtype>
__global__ void
unique_row2num_nonzero(const int n, Dtype *__restrict__ d_num_nonzero,
const Itype *__restrict__ unique_row_ptr,
const Dtype *__restrict__ reduced_val_ptr) {
CUDA_KERNEL_LOOP(index, n) {
d_num_nonzero[unique_row_ptr[index]] = reduced_val_ptr[index];
}
}
template <typename Dtype, typename Itype>
__global__ void set_gradient(const int n, const Dtype *d_grad_out,
Dtype *d_grad_in, const Itype *out_index,
int nchannel) {
CUDA_KERNEL_LOOP(index, n) {
atomicAdd(&d_grad_in[out_index[index]], d_grad_out[index]);
}
}
template <typename Dtype, typename Itype>
__global__ void
set_gradient_nonzero(const int n, const Dtype *d_grad_out, Dtype *d_grad_in,
int nchannel, const Itype *in_map, const Itype *out_map) {
CUDA_KERNEL_LOOP(index, n) {
int nrow = index / nchannel;
int ch = index % nchannel;
atomicAdd(&d_grad_in[in_map[nrow] * nchannel + ch],
d_grad_out[out_map[nrow] * nchannel + ch]);
}
}
template <typename Dtype, typename Itype>
__global__ void
set_gradient_nonzero_avg(const int n, const Dtype *d_grad_out, Dtype *d_grad_in,
int nchannel, const Dtype *d_num_nonzero,
const Itype *in_map, const Itype *out_map) {
CUDA_KERNEL_LOOP(index, n) {
int nrow = index / nchannel;
int ch = index % nchannel;
int curr_num_nonzero = d_num_nonzero[out_map[nrow]];
if (curr_num_nonzero >= 1)
atomicAdd(&d_grad_in[in_map[nrow] * nchannel + ch],
d_grad_out[out_map[nrow] * nchannel + ch] / curr_num_nonzero);
}
}
template <typename Dtype, typename Itype, typename ByteAllocator>
void NonzeroAvgPoolingForwardKernelGPU(
Dtype const *d_in_feat, //
default_types::size_type const in_nrows, //
Dtype *d_out_feat, //
default_types::size_type const out_nrows, //
Dtype *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<Itype, ByteAllocator> const &kernel_map, //
bool const use_avg, //
ByteAllocator &allocator, //
cusparseHandle_t cushandle, cudaStream_t stream) {
const Dtype alpha = 1;
const Dtype beta = 0;
static_assert(sizeof(Itype) == sizeof(int),
"cusparse requires int type index");
Dtype *d_ones, *d_coo_val, *d_tmp_out_feat;
constexpr bool is_int32 = sizeof(Itype) == sizeof(int32_t);
constexpr bool is_int64 = sizeof(Itype) == sizeof(int64_t);
constexpr bool is_float32 = std::is_same<Dtype, float>::value;
cudaDataType cuda_data_type = is_float32 ? CUDA_R_32F : CUDA_R_64F;
cusparseSpMMAlg_t mm_alg;
#if defined(CUDART_VERSION) && (CUDART_VERSION < 10010)
ASSERT(false, "spmm sparse-dense requires CUDA 10.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) && \
(CUDART_VERSION < 11000)
mm_alg = CUSPARSE_COOMM_ALG1;
static_assert(is_int32, "int64 cusparseSpMM requires CUDA 11.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 11000)
mm_alg = CUSPARSE_SPMM_COO_ALG1;
static_assert(is_int32 || is_int64, "Invalid index type");
#endif
/* sparse mm prep */
size_t const sparse_nnzs =
kernel_map.in_maps.end() - kernel_map.in_maps.begin();
static_assert(is_int32, "sort_coo supports int32");
sort_coo_gpu<ByteAllocator>(cushandle, out_nrows, in_nrows, sparse_nnzs,
(int *)kernel_map.out_maps.begin(),
(int *)kernel_map.in_maps.begin(), allocator);
// feature output
d_tmp_out_feat =
(Dtype *)allocator.allocate(nchannel * out_nrows * sizeof(Dtype));
d_coo_val = (Dtype *)allocator.allocate(sparse_nnzs * sizeof(Dtype));
fill<Dtype><<<GET_BLOCKS(sparse_nnzs, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0,
stream>>>(sparse_nnzs, d_coo_val, (Dtype)1.);
if (use_avg) {
d_ones = (Dtype *)allocator.allocate(sparse_nnzs * sizeof(Dtype));
fill<Dtype><<<GET_BLOCKS(sparse_nnzs, CUDA_NUM_THREADS), CUDA_NUM_THREADS,
0, stream>>>(sparse_nnzs, d_ones, (Dtype)1.);
}
#ifdef DEBUG
std::cout << "sparse_nnzs: " << sparse_nnzs << "\n";
Itype *p_scr = (Itype *)std::malloc((sparse_nnzs)*2 * sizeof(Itype));
CUDA_CHECK(cudaMemcpy(p_scr, kernel_map.out_maps.begin(),
sparse_nnzs * sizeof(Itype), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(p_scr + sparse_nnzs, kernel_map.in_maps.begin(),
sparse_nnzs * sizeof(Itype), cudaMemcpyDeviceToHost));
Itype step = std::max<Itype>(sparse_nnzs / 100, 1);
Itype i = 0;
for (; i < sparse_nnzs;) {
std::cout << i;
std::cout << " out_map: " << p_scr[i]
<< ", in_map: " << p_scr[i + sparse_nnzs] << "\n";
i += step;
}
i -= step;
for (; i < sparse_nnzs; ++i) {
std::cout << i;
std::cout << " out_map: " << p_scr[i]
<< ", in_map: " << p_scr[i + sparse_nnzs] << "\n";
}
std::free(p_scr);
std::cout << "done printing\n";
#endif
Itype *sorted_row_ptr =
(Itype *)allocator.allocate(2 * (sparse_nnzs + 1) * sizeof(Itype));
Itype *sorted_col_ptr = sorted_row_ptr + sparse_nnzs + 1;
CUDA_CHECK(cudaMemcpy(sorted_row_ptr, kernel_map.out_maps.begin(),
sparse_nnzs * sizeof(Itype), cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(sorted_col_ptr, kernel_map.in_maps.begin(),
sparse_nnzs * sizeof(Itype), cudaMemcpyDeviceToDevice));
thrust::sort_by_key(thrust::device, //
sorted_row_ptr, // key begin
sorted_row_ptr + sparse_nnzs, // key end
sorted_col_ptr);
// +---------+ +---+
// | spm | | i |
// +---------+ | n |
// in_nrows | |
// | F |
// | |
// +---+
// nchannel
size_t dim_i = out_nrows, dim_j = in_nrows, dim_k = nchannel;
cusparseSpMatDescr_t sparse_descr;
cusparseDnMatDescr_t dense_descr;
cusparseDnMatDescr_t result_descr;
CUSPARSE_CHECK(
cusparseCreateCoo(&sparse_descr, //
dim_i, dim_j, sparse_nnzs, //
sorted_row_ptr, // rows
sorted_col_ptr, // cols
d_coo_val, // coo vals
is_int32 ? CUSPARSE_INDEX_32I : CUSPARSE_INDEX_64I,
CUSPARSE_INDEX_BASE_ZERO, cuda_data_type));
CUSPARSE_CHECK(cusparseCreateDnMat(&dense_descr, //
dim_k, dim_j, dim_k, //
(void *)d_in_feat, //
cuda_data_type, CUSPARSE_ORDER_COL));
CUSPARSE_CHECK(cusparseCreateDnMat(&result_descr, //
dim_i, dim_k, dim_i, //
(void *)d_tmp_out_feat, //
cuda_data_type, CUSPARSE_ORDER_COL));
size_t buffer_size = 0;
CUSPARSE_CHECK(cusparseSpMM_bufferSize(
cushandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE,
(void *)&alpha, sparse_descr, dense_descr, (void *)&beta, result_descr,
cuda_data_type, mm_alg, &buffer_size));
// buffer size 0 for CUSPARSE_SPMM_COO_ALG1, CUSPARSE_SPMM_COO_ALG3,
// CUSPARSE_SPMM_COO_ALG4, and CUSPARSE_SPMM_CSR_ALG1
// WARNING: coo sorting must have been handled in the kernel map
// decomposition.
CUSPARSE_CHECK(cusparseSpMM(cushandle, //
CUSPARSE_OPERATION_NON_TRANSPOSE, //
CUSPARSE_OPERATION_TRANSPOSE, //
(void *)&alpha, //
sparse_descr, dense_descr, //
(void *)&beta, result_descr, //
cuda_data_type, mm_alg, &buffer_size));
#ifdef DEBUG
CUDA_CHECK(cudaStreamSynchronize(0));
#endif
LOG_DEBUG("SPMM");
if (use_avg) {
Itype *unique_row_ptr =
(Itype *)allocator.allocate(sparse_nnzs * sizeof(Itype));
Dtype *reduced_val_ptr =
(Dtype *)allocator.allocate(sparse_nnzs * sizeof(Dtype));
// reduce by key
auto end = thrust::reduce_by_key(thrust::device, // policy
sorted_row_ptr, // key begin
sorted_row_ptr + sparse_nnzs, // key end
d_ones, // value begin
unique_row_ptr, // key out begin
reduced_val_ptr // value out begin
);
int num_unique_keys = end.first - unique_row_ptr;
LOG_DEBUG("Num unique keys:", num_unique_keys);
#ifdef DEBUG
Itype *p_unique_row = (Itype *)std::malloc(num_unique_keys * sizeof(Itype));
CUDA_CHECK(cudaMemcpy(p_unique_row, unique_row_ptr,
num_unique_keys * sizeof(Itype),
cudaMemcpyDeviceToHost));
std::cout << "[" << PtrToString(p_unique_row, num_unique_keys) << "]\n";
std::free(p_unique_row);
Dtype *p_reduced_val =
(Dtype *)std::malloc(num_unique_keys * sizeof(Dtype));
CUDA_CHECK(cudaMemcpy(p_reduced_val, reduced_val_ptr,
num_unique_keys * sizeof(Dtype),
cudaMemcpyDeviceToHost));
std::cout << "[" << PtrToString(p_reduced_val, num_unique_keys) << "]\n";
std::free(p_reduced_val);
#endif
// Copy the results to the correct output
unique_row2num_nonzero<Itype, Dtype>
<<<GET_BLOCKS(num_unique_keys, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0,
stream>>>(num_unique_keys, d_num_nonzero, unique_row_ptr,
reduced_val_ptr);
col2row_major_with_div<Dtype>
<<<GET_BLOCKS(out_nrows * nchannel, CUDA_NUM_THREADS), CUDA_NUM_THREADS,
0, stream>>>(out_nrows * nchannel, out_nrows, nchannel,
d_num_nonzero, d_tmp_out_feat, d_out_feat);
#ifdef DEBUG
CUDA_CHECK(cudaStreamSynchronize(0));
#endif
LOG_DEBUG("col2row");
// Delete tmp spaces
allocator.deallocate((char *)unique_row_ptr, sparse_nnzs * sizeof(Itype));
allocator.deallocate((char *)reduced_val_ptr, sparse_nnzs * sizeof(Dtype));
} else {
col2row_major<Dtype><<<GET_BLOCKS(out_nrows * nchannel, CUDA_NUM_THREADS),
CUDA_NUM_THREADS, 0, stream>>>(
out_nrows * nchannel, out_nrows, nchannel, d_tmp_out_feat, d_out_feat);
}
CUSPARSE_CHECK(cusparseDestroySpMat(sparse_descr));
CUSPARSE_CHECK(cusparseDestroyDnMat(dense_descr));
CUSPARSE_CHECK(cusparseDestroyDnMat(result_descr));
allocator.deallocate((char *)d_coo_val, sparse_nnzs * sizeof(Dtype));
allocator.deallocate((char *)d_tmp_out_feat,
nchannel * out_nrows * sizeof(Dtype));
if (use_avg)
allocator.deallocate((char *)d_ones, in_nrows * sizeof(Dtype));
allocator.deallocate((char *)sorted_row_ptr,
2 * (sparse_nnzs + 1) * sizeof(Itype));
CUDA_CHECK(cudaStreamSynchronize(0));
}
// default_allocator
template void
NonzeroAvgPoolingForwardKernelGPU<float, uint32_t,
detail::default_allocator<char>>(
float const *d_in_feat, //
default_types::size_type const in_nrows, //
float *d_out_feat, //
default_types::size_type const out_nrows, //
float *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::default_allocator<char>> const
&kernel_map, //
bool const use_avg,
detail::default_allocator<char> &allocator, //
cusparseHandle_t cushandle, cudaStream_t stream);
template void
NonzeroAvgPoolingForwardKernelGPU<double, uint32_t,
detail::default_allocator<char>>(
double const *d_in_feat, //
default_types::size_type const in_nrows, //
double *d_out_feat, //
default_types::size_type const out_nrows, //
double *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::default_allocator<char>> const
&kernel_map, //
bool const use_avg,
detail::default_allocator<char> &allocator, //
cusparseHandle_t cushandle, cudaStream_t stream);
// c10_allocator
template void
NonzeroAvgPoolingForwardKernelGPU<float, uint32_t, detail::c10_allocator<char>>(
float const *d_in_feat, //
default_types::size_type const in_nrows, //
float *d_out_feat, //
default_types::size_type const out_nrows, //
float *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::c10_allocator<char>> const &kernel_map, //
bool const use_avg,
detail::c10_allocator<char> &allocator, //
cusparseHandle_t cushandle, cudaStream_t stream);
template void NonzeroAvgPoolingForwardKernelGPU<double, uint32_t,
detail::c10_allocator<char>>(
double const *d_in_feat, //
default_types::size_type const in_nrows, //
double *d_out_feat, //
default_types::size_type const out_nrows, //
double *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::c10_allocator<char>> const &kernel_map, //
bool const use_avg,
detail::c10_allocator<char> &allocator, //
cusparseHandle_t cushandle, cudaStream_t stream);
// Backward
template <typename Dtype, typename Itype, typename ByteAllocator>
void NonzeroAvgPoolingBackwardKernelGPU(
Dtype *d_grad_in_feat, //
default_types::size_type const in_nrows, //
Dtype const *d_grad_out_feat, //
default_types::size_type const out_nrows, //
Dtype const *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<Itype, ByteAllocator> const &kernel_map, bool const use_avg,
cudaStream_t stream) {
// d_grad_in_feat must be all set to 0
size_t sparse_nnzs = kernel_map.in_maps.end() - kernel_map.in_maps.begin();
if (use_avg) {
set_gradient_nonzero_avg<Dtype>
<<<GET_BLOCKS(sparse_nnzs * nchannel, CUDA_NUM_THREADS),
CUDA_NUM_THREADS, 0, stream>>>(
sparse_nnzs * nchannel, d_grad_out_feat, d_grad_in_feat, nchannel,
d_num_nonzero, kernel_map.in_maps.cdata(),
kernel_map.out_maps.cdata());
} else {
set_gradient_nonzero<Dtype>
<<<GET_BLOCKS(sparse_nnzs * nchannel, CUDA_NUM_THREADS),
CUDA_NUM_THREADS, 0, stream>>>(
sparse_nnzs * nchannel, d_grad_out_feat, d_grad_in_feat, nchannel,
kernel_map.in_maps.cdata(), kernel_map.out_maps.cdata());
}
CUDA_CHECK(cudaDeviceSynchronize());
}
// default_allocator
template void
NonzeroAvgPoolingBackwardKernelGPU<float, uint32_t,
detail::default_allocator<char>>(
float *d_grad_in_feat, //
default_types::size_type const in_nrows, //
float const *d_grad_out_feat, //
default_types::size_type const out_nrows, //
float const *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::default_allocator<char>> const &kernel_map,
bool const use_avg, cudaStream_t stream);
template void
NonzeroAvgPoolingBackwardKernelGPU<double, uint32_t,
detail::default_allocator<char>>(
double *d_grad_in_feat, //
default_types::size_type const in_nrows, //
double const *d_grad_out_feat, //
default_types::size_type const out_nrows, //
double const *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::default_allocator<char>> const &kernel_map,
bool const use_avg, cudaStream_t stream);
// c10_allocator
template void NonzeroAvgPoolingBackwardKernelGPU<float, uint32_t,
detail::c10_allocator<char>>(
float *d_grad_in_feat, //
default_types::size_type const in_nrows, //
float const *d_grad_out_feat, //
default_types::size_type const out_nrows, //
float const *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::c10_allocator<char>> const &kernel_map,
bool const use_avg, cudaStream_t stream);
template void NonzeroAvgPoolingBackwardKernelGPU<double, uint32_t,
detail::c10_allocator<char>>(
double *d_grad_in_feat, //
default_types::size_type const in_nrows, //
double const *d_grad_out_feat, //
default_types::size_type const out_nrows, //
double const *d_num_nonzero, //
default_types::size_type const nchannel, //
gpu_kernel_map<uint32_t, detail::c10_allocator<char>> const &kernel_map,
bool const use_avg, cudaStream_t stream);
} // end namespace minkowski
#endif // end GPU_POOLING_AVG
|
29123146e0aff42b716ed002ac8bdb5376caa07c.hip | // !!! This is a file automatically generated by hipify!!!
#include <THHUNN/THHUNN.h>
#include <THHUNN/common.h>
#include <TH/THHalf.h>
#include <THH/THHNumerics.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THH/THHApply.cuh>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
#include <thrust/system/hip/execution_policy.h>
#endif
template <typename Dtype, typename Acctype>
struct mse_functor
{
mse_functor() {}
__host__ __device__ Acctype operator()(const Dtype &x, const Dtype &y) const
{
Acctype z = ScalarConvert<Dtype, Acctype>::to(x)-y;
return z*z;
}
};
template <typename Dtype>
struct mse_updateOutput_functor
{
mse_updateOutput_functor() {}
__device__ void operator()(
const Dtype *input,
const Dtype *target,
Dtype *output)
{
Dtype diff = THCNumerics<Dtype>::sub(*input, *target);
*output = THCNumerics<Dtype>::mul(diff, diff);
}
};
template <typename Dtype, typename Acctype>
struct mse_updateGradInput_functor
{
const Acctype norm;
mse_updateGradInput_functor(Acctype norm_)
: norm(norm_)
{}
__host__ __device__ Dtype operator()(const Dtype &x, const Dtype &y) const
{
return ScalarConvert<Acctype, Dtype>::to(norm * (ScalarConvert<Dtype, Acctype>::to(x) - y));
}
};
#include <THHUNN/generic/MSECriterion.hip>
#include <THH/THHGenerateFloatTypes.h>
| 29123146e0aff42b716ed002ac8bdb5376caa07c.cu | #include <THCUNN/THCUNN.h>
#include <THCUNN/common.h>
#include <TH/THHalf.h>
#include <THC/THCNumerics.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THC/THCApply.cuh>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
#include <thrust/system/cuda/execution_policy.h>
#endif
template <typename Dtype, typename Acctype>
struct mse_functor
{
mse_functor() {}
__host__ __device__ Acctype operator()(const Dtype &x, const Dtype &y) const
{
Acctype z = ScalarConvert<Dtype, Acctype>::to(x)-y;
return z*z;
}
};
template <typename Dtype>
struct mse_updateOutput_functor
{
mse_updateOutput_functor() {}
__device__ void operator()(
const Dtype *input,
const Dtype *target,
Dtype *output)
{
Dtype diff = THCNumerics<Dtype>::sub(*input, *target);
*output = THCNumerics<Dtype>::mul(diff, diff);
}
};
template <typename Dtype, typename Acctype>
struct mse_updateGradInput_functor
{
const Acctype norm;
mse_updateGradInput_functor(Acctype norm_)
: norm(norm_)
{}
__host__ __device__ Dtype operator()(const Dtype &x, const Dtype &y) const
{
return ScalarConvert<Acctype, Dtype>::to(norm * (ScalarConvert<Dtype, Acctype>::to(x) - y));
}
};
#include <THCUNN/generic/MSECriterion.cu>
#include <THC/THCGenerateFloatTypes.h>
|
33072ee2939fd294ddc230cbe1ed05e4b159b5f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "training/graph_group_async.h"
#include "kernels/tensor_operators.h"
namespace marian {
void AsyncGraphGroup::setScheduler(Ptr<Scheduler> scheduler) {
scheduler_ = scheduler;
// optimizer has to be registered last to see changes of learning rate
scheduler_->registerTrainingObserver(scheduler_);
for(auto opt : shardOpt_)
scheduler_->registerTrainingObserver(opt);
}
void AsyncGraphGroup::fetchParams(Tensor oldParams,
const std::vector<Tensor>& params,
int device_id) {
// @TODO read guard on parameters
int pos = 0;
std::vector<std::thread> threads;
for(int idx = 0; idx < devices_.size(); idx++) {
threads.emplace_back(std::thread(
[&](int idx, int pos) {
// individual mutex per-shard
std::lock_guard<std::mutex> guard(shardSync_[idx]);
oldParams->subtensor(pos, params[idx]->size())->copyFrom(params[idx]);
},
idx,
pos));
pos += shardSize_;
}
for(auto&& t : threads) {
t.join();
}
}
void AsyncGraphGroup::pushGradients(Tensor newGrads,
size_t batch_words,
int device_id) {
// add instead of copy?
std::vector<std::thread> threads;
int pos = 0;
for(int idx = 0; idx < devices_.size(); idx++) {
threads.emplace_back(std::thread(
[&](int idx, int pos) {
// individual mutex per-shard
std::lock_guard<std::mutex> guard(shardSync_[idx]);
grads_[idx]->copyFrom(newGrads->subtensor(pos, grads_[idx]->size()));
if(scaleLearningRate_) {
shardOpt_[idx]->update(
params_[idx], grads_[idx], batch_words / avgBatchWords_);
} else {
shardOpt_[idx]->update(params_[idx], grads_[idx]);
}
if(movingAvg_)
updateMovingAverage(
paramsAvg_[idx], params_[idx], scheduler_->numberOfBatches());
},
idx,
pos));
pos += shardSize_;
}
for(auto&& t : threads)
t.join();
}
void AsyncGraphGroup::updateMovingAverage(Tensor paramsAvg,
Tensor params,
size_t batches) {
using namespace functional;
float decay
= ::max(mvDecay_, 1.f - (float)(batches + 1) / (float)(batches + 10));
Element(_1 = ((1.f - decay) * _1) + (decay * _2), paramsAvg, params);
}
void AsyncGraphGroup::init(Ptr<data::Batch> batch) {
// initialize the parameters
for(size_t i = 0; i < graphs_.size(); ++i) {
// takes care of thead_local stuff
THREAD_GUARD(builders_[i]->build(graphs_[i], batch);
graphs_[i]->forward(););
}
if(params_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
shardSize_ = ceil(totalSize / (float)devices_.size());
int pos = 0;
// parameter sharding
for(auto device : devices_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor param;
Ptr<TensorAllocator> allocator = New<TensorAllocator>(device);
allocator->reserveExact(__size__ * sizeof(float));
allocator->allocate(param, {1, __size__});
paramsAlloc_.push_back(allocator);
param->copyFrom(graphs_[0]->params()->vals()->subtensor(pos, __size__));
params_.push_back(param);
pos += __size__;
}
}
if(grads_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
for(auto device : devices_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor grad_;
Ptr<TensorAllocator> allocator_ = New<TensorAllocator>(device);
allocator_->reserveExact(__size__ * sizeof(float));
allocator_->allocate(grad_, {1, __size__});
gradsAlloc_.push_back(allocator_);
grads_.push_back(grad_);
}
}
if(movingAvg_) {
if(paramsAvg_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
int i = 0;
for(auto device : devices_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor paramAvg;
Ptr<TensorAllocator> allocator = New<TensorAllocator>(device);
allocator->reserveExact(__size__ * sizeof(float));
allocator->allocate(paramAvg, {1, __size__});
paramAvg->copyFrom(params_[i++]);
paramsAllocAvg_.push_back(allocator);
paramsAvg_.push_back(paramAvg);
}
}
}
}
void AsyncGraphGroup::execute(Ptr<data::Batch> batch) {
if(first_) {
init(batch);
first_ = false;
}
auto task = [this](Ptr<data::Batch> batch) {
static size_t i = 0;
thread_local Ptr<ExpressionGraph> graph;
thread_local Ptr<models::ModelBase> builder;
thread_local size_t t = 0;
thread_local size_t num_seen_words = 0;
thread_local int t_id = 0;
thread_local Tensor accGradients;
thread_local Ptr<TensorAllocator> accAlloc;
if(!graph) {
std::lock_guard<std::mutex> lock(sync_);
t_id = i;
graph = graphs_[i];
builder = builders_[i++];
}
auto costNode = builder->build(graph, batch);
if(t % tau_ == 0) {
fetchParams(graph->params()->vals(), params_, t_id);
}
graph->forward();
float cost = costNode->scalar();
graph->backward();
// Get batch stats
size_t batch_words = batch->words();
Tensor gradients;
if(tau_ > 1) {
if(t == 0) {
accAlloc = New<TensorAllocator>(graph->getDevice());
accAlloc->reserveExact(graph->params()->grads()->memory()->size());
accAlloc->allocate(accGradients, graph->params()->grads()->shape());
accGradients->set(0);
}
using namespace functional;
Element(_1 += _2, accGradients, graph->params()->grads());
gradients = accGradients;
// Keep track of how many words we've calculated the error from
num_seen_words += batch_words;
} else {
gradients = graph->params()->grads();
num_seen_words = batch_words;
}
t++;
if(t % tau_ == 0) {
pushGradients(gradients, num_seen_words, t_id);
// Reset the counter of seen words after gradient update
num_seen_words = 0;
if(tau_ > 1)
gradients->set(0);
}
if(scheduler_) {
std::unique_lock<std::mutex> lock(schedulerMutex_);
// Wait until the thread that wants to do validation is finished.
pool_.wait_for_one(lock);
scheduler_->update(cost, batch);
if(scheduler_->saving()) {
if(movingAvg_)
fetchParams(graph->params()->vals(), paramsAvg_, t_id);
this->save(graph);
}
if(scheduler_->validating()) {
// Wait with validation until all other threads are done with update.
// We want to reuse the graphs for validation, so they need to be in
// a safe state.
pool_.wait_for_others(lock);
if(movingAvg_)
for(auto g : graphs_)
fetchParams(g->params()->vals(), paramsAvg_, t_id);
scheduler_->validate(graphs_);
// Validation is done, tell other threads to continue work.
pool_.notify_others();
}
}
};
pool_.enqueue(task, batch);
}
}
| 33072ee2939fd294ddc230cbe1ed05e4b159b5f7.cu | #include "training/graph_group_async.h"
#include "kernels/tensor_operators.h"
namespace marian {
void AsyncGraphGroup::setScheduler(Ptr<Scheduler> scheduler) {
scheduler_ = scheduler;
// optimizer has to be registered last to see changes of learning rate
scheduler_->registerTrainingObserver(scheduler_);
for(auto opt : shardOpt_)
scheduler_->registerTrainingObserver(opt);
}
void AsyncGraphGroup::fetchParams(Tensor oldParams,
const std::vector<Tensor>& params,
int device_id) {
// @TODO read guard on parameters
int pos = 0;
std::vector<std::thread> threads;
for(int idx = 0; idx < devices_.size(); idx++) {
threads.emplace_back(std::thread(
[&](int idx, int pos) {
// individual mutex per-shard
std::lock_guard<std::mutex> guard(shardSync_[idx]);
oldParams->subtensor(pos, params[idx]->size())->copyFrom(params[idx]);
},
idx,
pos));
pos += shardSize_;
}
for(auto&& t : threads) {
t.join();
}
}
void AsyncGraphGroup::pushGradients(Tensor newGrads,
size_t batch_words,
int device_id) {
// add instead of copy?
std::vector<std::thread> threads;
int pos = 0;
for(int idx = 0; idx < devices_.size(); idx++) {
threads.emplace_back(std::thread(
[&](int idx, int pos) {
// individual mutex per-shard
std::lock_guard<std::mutex> guard(shardSync_[idx]);
grads_[idx]->copyFrom(newGrads->subtensor(pos, grads_[idx]->size()));
if(scaleLearningRate_) {
shardOpt_[idx]->update(
params_[idx], grads_[idx], batch_words / avgBatchWords_);
} else {
shardOpt_[idx]->update(params_[idx], grads_[idx]);
}
if(movingAvg_)
updateMovingAverage(
paramsAvg_[idx], params_[idx], scheduler_->numberOfBatches());
},
idx,
pos));
pos += shardSize_;
}
for(auto&& t : threads)
t.join();
}
void AsyncGraphGroup::updateMovingAverage(Tensor paramsAvg,
Tensor params,
size_t batches) {
using namespace functional;
float decay
= std::max(mvDecay_, 1.f - (float)(batches + 1) / (float)(batches + 10));
Element(_1 = ((1.f - decay) * _1) + (decay * _2), paramsAvg, params);
}
void AsyncGraphGroup::init(Ptr<data::Batch> batch) {
// initialize the parameters
for(size_t i = 0; i < graphs_.size(); ++i) {
// takes care of thead_local stuff
THREAD_GUARD(builders_[i]->build(graphs_[i], batch);
graphs_[i]->forward(););
}
if(params_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
shardSize_ = ceil(totalSize / (float)devices_.size());
int pos = 0;
// parameter sharding
for(auto device : devices_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor param;
Ptr<TensorAllocator> allocator = New<TensorAllocator>(device);
allocator->reserveExact(__size__ * sizeof(float));
allocator->allocate(param, {1, __size__});
paramsAlloc_.push_back(allocator);
param->copyFrom(graphs_[0]->params()->vals()->subtensor(pos, __size__));
params_.push_back(param);
pos += __size__;
}
}
if(grads_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
for(auto device : devices_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor grad_;
Ptr<TensorAllocator> allocator_ = New<TensorAllocator>(device);
allocator_->reserveExact(__size__ * sizeof(float));
allocator_->allocate(grad_, {1, __size__});
gradsAlloc_.push_back(allocator_);
grads_.push_back(grad_);
}
}
if(movingAvg_) {
if(paramsAvg_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
int i = 0;
for(auto device : devices_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor paramAvg;
Ptr<TensorAllocator> allocator = New<TensorAllocator>(device);
allocator->reserveExact(__size__ * sizeof(float));
allocator->allocate(paramAvg, {1, __size__});
paramAvg->copyFrom(params_[i++]);
paramsAllocAvg_.push_back(allocator);
paramsAvg_.push_back(paramAvg);
}
}
}
}
void AsyncGraphGroup::execute(Ptr<data::Batch> batch) {
if(first_) {
init(batch);
first_ = false;
}
auto task = [this](Ptr<data::Batch> batch) {
static size_t i = 0;
thread_local Ptr<ExpressionGraph> graph;
thread_local Ptr<models::ModelBase> builder;
thread_local size_t t = 0;
thread_local size_t num_seen_words = 0;
thread_local int t_id = 0;
thread_local Tensor accGradients;
thread_local Ptr<TensorAllocator> accAlloc;
if(!graph) {
std::lock_guard<std::mutex> lock(sync_);
t_id = i;
graph = graphs_[i];
builder = builders_[i++];
}
auto costNode = builder->build(graph, batch);
if(t % tau_ == 0) {
fetchParams(graph->params()->vals(), params_, t_id);
}
graph->forward();
float cost = costNode->scalar();
graph->backward();
// Get batch stats
size_t batch_words = batch->words();
Tensor gradients;
if(tau_ > 1) {
if(t == 0) {
accAlloc = New<TensorAllocator>(graph->getDevice());
accAlloc->reserveExact(graph->params()->grads()->memory()->size());
accAlloc->allocate(accGradients, graph->params()->grads()->shape());
accGradients->set(0);
}
using namespace functional;
Element(_1 += _2, accGradients, graph->params()->grads());
gradients = accGradients;
// Keep track of how many words we've calculated the error from
num_seen_words += batch_words;
} else {
gradients = graph->params()->grads();
num_seen_words = batch_words;
}
t++;
if(t % tau_ == 0) {
pushGradients(gradients, num_seen_words, t_id);
// Reset the counter of seen words after gradient update
num_seen_words = 0;
if(tau_ > 1)
gradients->set(0);
}
if(scheduler_) {
std::unique_lock<std::mutex> lock(schedulerMutex_);
// Wait until the thread that wants to do validation is finished.
pool_.wait_for_one(lock);
scheduler_->update(cost, batch);
if(scheduler_->saving()) {
if(movingAvg_)
fetchParams(graph->params()->vals(), paramsAvg_, t_id);
this->save(graph);
}
if(scheduler_->validating()) {
// Wait with validation until all other threads are done with update.
// We want to reuse the graphs for validation, so they need to be in
// a safe state.
pool_.wait_for_others(lock);
if(movingAvg_)
for(auto g : graphs_)
fetchParams(g->params()->vals(), paramsAvg_, t_id);
scheduler_->validate(graphs_);
// Validation is done, tell other threads to continue work.
pool_.notify_others();
}
}
};
pool_.enqueue(task, batch);
}
}
|
99d34a054654afbdfad50401b7dcb700b5cc3cc2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> s d c
@author Theo Mary
*/
#include "common_magma.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
zlascl2_full(int m, int n, const double* D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double mul = D[ind];
A += ind;
if (ind < m) {
for(int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
zlascl2_lower(int m, int n, const double* D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
double mul = D[ind];
A += ind;
if (ind < m) {
for(int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
zlascl2_upper(int m, int n, const double *D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double mul = D[ind];
A += ind;
if (ind < m) {
for(int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/**
Purpose
-------
ZLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
\param[in]
dD DOUBLE PRECISION vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
\param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlascl2_q(
magma_type_t type, magma_int_t m, magma_int_t n,
const double *dD, magmaDoubleComplex *dA, magma_int_t ldda, magma_int_t *info,
magma_queue_t queue )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
if (type == MagmaLower) {
hipLaunchKernelGGL(( zlascl2_lower) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( zlascl2_upper) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( zlascl2_full) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda);
}
}
/**
@see magmablas_zlascl2_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlascl2(
magma_type_t type, magma_int_t m, magma_int_t n,
const double *dD, magmaDoubleComplex *dA, magma_int_t ldda, magma_int_t *info )
{
magmablas_zlascl2_q( type, m, n, dD, dA, ldda, info, magma_stream );
}
| 99d34a054654afbdfad50401b7dcb700b5cc3cc2.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> s d c
@author Theo Mary
*/
#include "common_magma.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
zlascl2_full(int m, int n, const double* D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double mul = D[ind];
A += ind;
if (ind < m) {
for(int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
zlascl2_lower(int m, int n, const double* D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
double mul = D[ind];
A += ind;
if (ind < m) {
for(int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
zlascl2_upper(int m, int n, const double *D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double mul = D[ind];
A += ind;
if (ind < m) {
for(int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/**
Purpose
-------
ZLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
\param[in]
dD DOUBLE PRECISION vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
\param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlascl2_q(
magma_type_t type, magma_int_t m, magma_int_t n,
const double *dD, magmaDoubleComplex *dA, magma_int_t ldda, magma_int_t *info,
magma_queue_t queue )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
if (type == MagmaLower) {
zlascl2_lower <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
zlascl2_upper <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
zlascl2_full <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda);
}
}
/**
@see magmablas_zlascl2_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlascl2(
magma_type_t type, magma_int_t m, magma_int_t n,
const double *dD, magmaDoubleComplex *dA, magma_int_t ldda, magma_int_t *info )
{
magmablas_zlascl2_q( type, m, n, dD, dA, ldda, info, magma_stream );
}
|
8647d95aa9a852a4811c4abf35cb06d767ffeda1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudaCompress/PackInc.h>
#include <cassert>
#include <cudaCompress/tools/Operator.h>
#include <cudaCompress/cudaUtil.h>
#include <cudaCompress/util.h>
#include <cudaCompress/InstanceImpl.h>
#include <cudaCompress/reduce/reduce_app.cui>
#include <cudaCompress/scan/scan_app.cui>
namespace cudaCompress {
static uint getValueCountMax(const Instance* pInstance)
{
return (pInstance->m_elemCountPerStreamMax + pInstance->m_codingBlockSize - 1) / pInstance->m_codingBlockSize;
}
size_t packIncGetRequiredMemory(const Instance* pInstance)
{
uint valueCountMax = getValueCountMax(pInstance);
size_t size = 0;
// encode and decode: dpValueIncrements
size += getAlignedSize(valueCountMax * sizeof(uint), 128);
// encode: dpReduceOut
size += getAlignedSize(sizeof(uint), 128);
return size;
}
bool packIncInit(Instance* pInstance)
{
return true;
}
bool packIncShutdown(Instance* pInstance)
{
return true;
}
template<typename TOut>
__global__ void computeIncrementsKernel(const uint* __restrict__ pValues, TOut* __restrict__ pValueIncrements, uint valueCount)
{
for(uint index = blockIdx.x * blockDim.x + threadIdx.x; index < valueCount; index += gridDim.x * blockDim.x) {
uint value = pValues[index];
uint previous = (index == 0 ? 0 : pValues[index-1]);
pValueIncrements[index] = TOut(value - previous);
}
}
__global__ void packIncrementsKernel(const uint* __restrict__ pValueIncrements, uint* __restrict__ pPackedValueIncrements, uint valueCount, uint packedWordCount, uint bitsPerValue)
{
for(uint packedIndex = blockIdx.x * blockDim.x + threadIdx.x; packedIndex < packedWordCount; packedIndex += gridDim.x * blockDim.x) {
uint bitIndex = packedIndex * 32;
uint valueIndex = bitIndex / bitsPerValue;
uint leftOverhang = bitIndex % bitsPerValue;
int bitShift = 32 - bitsPerValue + leftOverhang;
uint count = min(valueCount - valueIndex, (32 + leftOverhang + bitsPerValue - 1) / bitsPerValue);
uint result = 0;
for(uint i = 0; i < count; i++) {
uint value = pValueIncrements[valueIndex];
if(bitShift > 0)
value <<= bitShift;
else
value >>= -bitShift;
result |= value;
valueIndex++;
bitShift -= bitsPerValue;
}
pPackedValueIncrements[packedIndex] = result;
}
}
__global__ void unpackIncrementsKernel(uint* __restrict__ pValueIncrements, const uint* __restrict__ pPackedValueIncrements, uint valueCount, uint bitsPerValue)
{
uint mask = (1 << bitsPerValue) - 1;
for(uint index = blockIdx.x * blockDim.x + threadIdx.x; index < valueCount; index += gridDim.x * blockDim.x) {
uint bitIndex = index * bitsPerValue;
uint packedIndex = bitIndex / 32;
uint leftBitOffset = bitIndex % 32;
int bitShift = bitsPerValue + leftBitOffset - 32;
uint value = pPackedValueIncrements[packedIndex];
if(bitShift > 0)
value <<= bitShift;
else
value >>= -bitShift;
uint result = value & mask;
if(bitShift > 0) {
bitShift -= 32;
value = pPackedValueIncrements[packedIndex + 1];
value >>= -bitShift;
result |= value & mask;
}
pValueIncrements[index] = result;
}
}
bool packInc(Instance* pInstance, const uint* dpValues, uint* dpPackedValueIncrements, uint valueCount, uint& bitsPerValue)
{
uint* dpValueIncrements = pInstance->getBuffer<uint>(valueCount);
uint* dpReduceOut = pInstance->getBuffer<uint>(1);
// build the increments
uint blockSize = 256;
uint blockCount = min((valueCount + blockSize - 1) / blockSize, 512u);
hipLaunchKernelGGL(( computeIncrementsKernel<uint>), dim3(blockCount), dim3(blockSize), 0, 0, dpValues, dpValueIncrements, valueCount);
cudaCheckMsg("computeIncrementsKernel execution failed");
// find max value, compute packed size
uint valueMax = 0;
reduceArray<uint, OperatorMax<uint> >(dpReduceOut, dpValueIncrements, valueCount, pInstance->m_pReducePlan);
cudaCheckMsg("packInc: error in reduceArray");
cudaSafeCall(hipMemcpy(&valueMax, dpReduceOut, sizeof(uint), hipMemcpyDeviceToHost));
bitsPerValue = getRequiredBits(valueMax);
uint packedWordCount = (valueCount * bitsPerValue + 31) / 32;
// pack
blockSize = 256;
blockCount = min((packedWordCount + blockSize - 1) / blockSize, 512u);
hipLaunchKernelGGL(( packIncrementsKernel), dim3(blockCount), dim3(blockSize), 0, 0, dpValueIncrements, dpPackedValueIncrements, valueCount, packedWordCount, bitsPerValue);
cudaCheckMsg("packIncrementsKernel execution failed");
pInstance->releaseBuffers(2);
return true;
}
bool unpackInc(Instance* pInstance, uint* dpValues, const uint* dpPackedValueIncrements, uint valueCount, uint bitsPerValue)
{
uint* dpValueIncrements = pInstance->getBuffer<uint>(valueCount);
// unpack
uint blockSize = 256;
uint blockCount = min((valueCount + blockSize - 1) / blockSize, 512u);
hipLaunchKernelGGL(( unpackIncrementsKernel), dim3(blockCount), dim3(blockSize), 0, 0, dpValueIncrements, dpPackedValueIncrements, valueCount, bitsPerValue);
cudaCheckMsg("unpackIncrementsKernel execution failed");
// scan to build absolute offsets
scanArray<uint, uint, false>(dpValues, dpValueIncrements, valueCount, pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("unpackInc: error in scanArray");
pInstance->releaseBuffer();
return true;
}
bool packInc16(Instance* pInstance, const uint* dpValues, ushort* dpValueIncrements, uint valueCount)
{
if(valueCount == 0)
return true;
uint* dpValueIncrementsTemp = pInstance->getBuffer<uint>(valueCount);
// build the increments
uint blockSize = 256;
uint blockCount = min((valueCount + blockSize - 1) / blockSize, 512u);
// don't write directly into dpValueIncrements because it may alias dpValues...
hipLaunchKernelGGL(( computeIncrementsKernel<ushort>), dim3(blockCount), dim3(blockSize), 0, 0, dpValues, (ushort*)dpValueIncrementsTemp, valueCount);
cudaCheckMsg("computeIncrementsKernel execution failed");
// copy to output array
cudaSafeCall(hipMemcpyAsync(dpValueIncrements, dpValueIncrementsTemp, valueCount * sizeof(ushort), hipMemcpyDeviceToDevice, pInstance->m_stream));
pInstance->releaseBuffer();
return true;
}
bool unpackInc16(Instance* pInstance, uint* dpValues, const ushort* dpValueIncrements, uint valueCount)
{
if(valueCount == 0)
return true;
// scan values to build absolute offsets
// FIXME This is *not* safe if dpValues and dpValueIncrements alias, and the scan needs > 1 block!
// (Would be okay if sizeof(*dpValues) == sizeof(*dpValueIncrements), which is not the case here!)
scanArray<ushort, uint, false>(dpValues, dpValueIncrements, valueCount, pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("unpackInc16: error in scanArray");
return true;
}
void packInc16CPU(const uint* pValues, ushort* pValueIncrements, uint valueCount)
{
// written to work correctly even when pValues and pValueIncrements alias
uint prev = pValues[0];
pValueIncrements[0] = prev;
for(uint i = 1; i < valueCount; i++) {
uint value = pValues[i];
pValueIncrements[i] = pValues[i] - prev;
prev = value;
}
}
void unpackInc16CPU(uint* pValues, const ushort* pValueIncrements, uint valueCount)
{
uint total = 0;
for(uint i = 0; i < valueCount; i++) {
total += pValueIncrements[i];
pValues[i] = total;
}
}
}
| 8647d95aa9a852a4811c4abf35cb06d767ffeda1.cu | #include <cudaCompress/PackInc.h>
#include <cassert>
#include <cudaCompress/tools/Operator.h>
#include <cudaCompress/cudaUtil.h>
#include <cudaCompress/util.h>
#include <cudaCompress/InstanceImpl.h>
#include <cudaCompress/reduce/reduce_app.cui>
#include <cudaCompress/scan/scan_app.cui>
namespace cudaCompress {
static uint getValueCountMax(const Instance* pInstance)
{
return (pInstance->m_elemCountPerStreamMax + pInstance->m_codingBlockSize - 1) / pInstance->m_codingBlockSize;
}
size_t packIncGetRequiredMemory(const Instance* pInstance)
{
uint valueCountMax = getValueCountMax(pInstance);
size_t size = 0;
// encode and decode: dpValueIncrements
size += getAlignedSize(valueCountMax * sizeof(uint), 128);
// encode: dpReduceOut
size += getAlignedSize(sizeof(uint), 128);
return size;
}
bool packIncInit(Instance* pInstance)
{
return true;
}
bool packIncShutdown(Instance* pInstance)
{
return true;
}
template<typename TOut>
__global__ void computeIncrementsKernel(const uint* __restrict__ pValues, TOut* __restrict__ pValueIncrements, uint valueCount)
{
for(uint index = blockIdx.x * blockDim.x + threadIdx.x; index < valueCount; index += gridDim.x * blockDim.x) {
uint value = pValues[index];
uint previous = (index == 0 ? 0 : pValues[index-1]);
pValueIncrements[index] = TOut(value - previous);
}
}
__global__ void packIncrementsKernel(const uint* __restrict__ pValueIncrements, uint* __restrict__ pPackedValueIncrements, uint valueCount, uint packedWordCount, uint bitsPerValue)
{
for(uint packedIndex = blockIdx.x * blockDim.x + threadIdx.x; packedIndex < packedWordCount; packedIndex += gridDim.x * blockDim.x) {
uint bitIndex = packedIndex * 32;
uint valueIndex = bitIndex / bitsPerValue;
uint leftOverhang = bitIndex % bitsPerValue;
int bitShift = 32 - bitsPerValue + leftOverhang;
uint count = min(valueCount - valueIndex, (32 + leftOverhang + bitsPerValue - 1) / bitsPerValue);
uint result = 0;
for(uint i = 0; i < count; i++) {
uint value = pValueIncrements[valueIndex];
if(bitShift > 0)
value <<= bitShift;
else
value >>= -bitShift;
result |= value;
valueIndex++;
bitShift -= bitsPerValue;
}
pPackedValueIncrements[packedIndex] = result;
}
}
__global__ void unpackIncrementsKernel(uint* __restrict__ pValueIncrements, const uint* __restrict__ pPackedValueIncrements, uint valueCount, uint bitsPerValue)
{
uint mask = (1 << bitsPerValue) - 1;
for(uint index = blockIdx.x * blockDim.x + threadIdx.x; index < valueCount; index += gridDim.x * blockDim.x) {
uint bitIndex = index * bitsPerValue;
uint packedIndex = bitIndex / 32;
uint leftBitOffset = bitIndex % 32;
int bitShift = bitsPerValue + leftBitOffset - 32;
uint value = pPackedValueIncrements[packedIndex];
if(bitShift > 0)
value <<= bitShift;
else
value >>= -bitShift;
uint result = value & mask;
if(bitShift > 0) {
bitShift -= 32;
value = pPackedValueIncrements[packedIndex + 1];
value >>= -bitShift;
result |= value & mask;
}
pValueIncrements[index] = result;
}
}
bool packInc(Instance* pInstance, const uint* dpValues, uint* dpPackedValueIncrements, uint valueCount, uint& bitsPerValue)
{
uint* dpValueIncrements = pInstance->getBuffer<uint>(valueCount);
uint* dpReduceOut = pInstance->getBuffer<uint>(1);
// build the increments
uint blockSize = 256;
uint blockCount = min((valueCount + blockSize - 1) / blockSize, 512u);
computeIncrementsKernel<uint><<<blockCount, blockSize>>>(dpValues, dpValueIncrements, valueCount);
cudaCheckMsg("computeIncrementsKernel execution failed");
// find max value, compute packed size
uint valueMax = 0;
reduceArray<uint, OperatorMax<uint> >(dpReduceOut, dpValueIncrements, valueCount, pInstance->m_pReducePlan);
cudaCheckMsg("packInc: error in reduceArray");
cudaSafeCall(cudaMemcpy(&valueMax, dpReduceOut, sizeof(uint), cudaMemcpyDeviceToHost));
bitsPerValue = getRequiredBits(valueMax);
uint packedWordCount = (valueCount * bitsPerValue + 31) / 32;
// pack
blockSize = 256;
blockCount = min((packedWordCount + blockSize - 1) / blockSize, 512u);
packIncrementsKernel<<<blockCount, blockSize>>>(dpValueIncrements, dpPackedValueIncrements, valueCount, packedWordCount, bitsPerValue);
cudaCheckMsg("packIncrementsKernel execution failed");
pInstance->releaseBuffers(2);
return true;
}
bool unpackInc(Instance* pInstance, uint* dpValues, const uint* dpPackedValueIncrements, uint valueCount, uint bitsPerValue)
{
uint* dpValueIncrements = pInstance->getBuffer<uint>(valueCount);
// unpack
uint blockSize = 256;
uint blockCount = min((valueCount + blockSize - 1) / blockSize, 512u);
unpackIncrementsKernel<<<blockCount, blockSize>>>(dpValueIncrements, dpPackedValueIncrements, valueCount, bitsPerValue);
cudaCheckMsg("unpackIncrementsKernel execution failed");
// scan to build absolute offsets
scanArray<uint, uint, false>(dpValues, dpValueIncrements, valueCount, pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("unpackInc: error in scanArray");
pInstance->releaseBuffer();
return true;
}
bool packInc16(Instance* pInstance, const uint* dpValues, ushort* dpValueIncrements, uint valueCount)
{
if(valueCount == 0)
return true;
uint* dpValueIncrementsTemp = pInstance->getBuffer<uint>(valueCount);
// build the increments
uint blockSize = 256;
uint blockCount = min((valueCount + blockSize - 1) / blockSize, 512u);
// don't write directly into dpValueIncrements because it may alias dpValues...
computeIncrementsKernel<ushort><<<blockCount, blockSize>>>(dpValues, (ushort*)dpValueIncrementsTemp, valueCount);
cudaCheckMsg("computeIncrementsKernel execution failed");
// copy to output array
cudaSafeCall(cudaMemcpyAsync(dpValueIncrements, dpValueIncrementsTemp, valueCount * sizeof(ushort), cudaMemcpyDeviceToDevice, pInstance->m_stream));
pInstance->releaseBuffer();
return true;
}
bool unpackInc16(Instance* pInstance, uint* dpValues, const ushort* dpValueIncrements, uint valueCount)
{
if(valueCount == 0)
return true;
// scan values to build absolute offsets
// FIXME This is *not* safe if dpValues and dpValueIncrements alias, and the scan needs > 1 block!
// (Would be okay if sizeof(*dpValues) == sizeof(*dpValueIncrements), which is not the case here!)
scanArray<ushort, uint, false>(dpValues, dpValueIncrements, valueCount, pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("unpackInc16: error in scanArray");
return true;
}
void packInc16CPU(const uint* pValues, ushort* pValueIncrements, uint valueCount)
{
// written to work correctly even when pValues and pValueIncrements alias
uint prev = pValues[0];
pValueIncrements[0] = prev;
for(uint i = 1; i < valueCount; i++) {
uint value = pValues[i];
pValueIncrements[i] = pValues[i] - prev;
prev = value;
}
}
void unpackInc16CPU(uint* pValues, const ushort* pValueIncrements, uint valueCount)
{
uint total = 0;
for(uint i = 0; i < valueCount; i++) {
total += pValueIncrements[i];
pValues[i] = total;
}
}
}
|
94bcfab1194c054e17decfc15abfd71a9dea23c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BLOCK_SIZE 32
#define DATALEN_PER_BLOCK (BLOCK_SIZE * 2)
#include <stdio.h>
__global__ void reductionKernel(float *a, float *r)
{
int blocksz = blockDim.x;
int tid = threadIdx.x;
int bid = blockIdx.x;
int i1 = DATALEN_PER_BLOCK * bid + tid;
int i2 = i1 + blocksz;
__shared__ float shared_data[BLOCK_SIZE];
shared_data[tid] = a[i1] + a[i2];
//shared_data[tid + blocksz] = a[i2];
__syncthreads();
for (int i = blocksz / 2; i > 0; i >>= 1) {
if (tid < i) {
shared_data[tid] += shared_data[tid + i];
}
__syncthreads();
}
r[bid] = shared_data[0];
}
float reduction(float *a, size_t len)
{
int data_len_per_block = DATALEN_PER_BLOCK;
float *dr = nullptr;
float *da = nullptr;
float *r = new float[data_len_per_block * sizeof(float)];
size_t tlen = len;
hipMalloc(&da, sizeof(float) * len);
for (int k = 0; k < 20; k++) {
len = tlen;
hipMemcpy(da, a, sizeof(float) * len, hipMemcpyHostToDevice);
while (len > data_len_per_block) {
len /= data_len_per_block;
dim3 threads(BLOCK_SIZE);
dim3 grids(len);
hipLaunchKernelGGL(( reductionKernel), dim3(grids), dim3(threads), 0, 0, da, da);
}
}
hipMemcpy(r, da, len * sizeof(float), hipMemcpyDeviceToHost);
if (len > 0) {
for (int i = 1; i < len; i++)
r[0] += r[i];
}
int rr = r[0];
hipFree(&da);
hipFree(&dr);
free(r);
return rr;
}
int main(int argc, char *argv[])
{
int len = 8192;
if (argc > 1)
len = atoi(argv[1]);
printf("len %d\n", len);
float *a = new float[len];
for (int i = 0; i < len; i++)
a[i] = 1.0;
float r = reduction(a, len);
printf("%.2f\n", r);
return 0;
}
| 94bcfab1194c054e17decfc15abfd71a9dea23c9.cu | #define BLOCK_SIZE 32
#define DATALEN_PER_BLOCK (BLOCK_SIZE * 2)
#include <stdio.h>
__global__ void reductionKernel(float *a, float *r)
{
int blocksz = blockDim.x;
int tid = threadIdx.x;
int bid = blockIdx.x;
int i1 = DATALEN_PER_BLOCK * bid + tid;
int i2 = i1 + blocksz;
__shared__ float shared_data[BLOCK_SIZE];
shared_data[tid] = a[i1] + a[i2];
//shared_data[tid + blocksz] = a[i2];
__syncthreads();
for (int i = blocksz / 2; i > 0; i >>= 1) {
if (tid < i) {
shared_data[tid] += shared_data[tid + i];
}
__syncthreads();
}
r[bid] = shared_data[0];
}
float reduction(float *a, size_t len)
{
int data_len_per_block = DATALEN_PER_BLOCK;
float *dr = nullptr;
float *da = nullptr;
float *r = new float[data_len_per_block * sizeof(float)];
size_t tlen = len;
cudaMalloc(&da, sizeof(float) * len);
for (int k = 0; k < 20; k++) {
len = tlen;
cudaMemcpy(da, a, sizeof(float) * len, cudaMemcpyHostToDevice);
while (len > data_len_per_block) {
len /= data_len_per_block;
dim3 threads(BLOCK_SIZE);
dim3 grids(len);
reductionKernel<<<grids, threads>>>(da, da);
}
}
cudaMemcpy(r, da, len * sizeof(float), cudaMemcpyDeviceToHost);
if (len > 0) {
for (int i = 1; i < len; i++)
r[0] += r[i];
}
int rr = r[0];
cudaFree(&da);
cudaFree(&dr);
free(r);
return rr;
}
int main(int argc, char *argv[])
{
int len = 8192;
if (argc > 1)
len = atoi(argv[1]);
printf("len %d\n", len);
float *a = new float[len];
for (int i = 0; i < len; i++)
a[i] = 1.0;
float r = reduction(a, len);
printf("%.2f\n", r);
return 0;
}
|
a79b43fbc854a2196dc0c458789c4a1929cf3968.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "io_iterator.h"
#include "rpu_pulsed_meta_parameter.h"
#include "rpucuda_mixedprec_device.h"
#include <hipcub/hipcub.hpp>
#include <memory>
namespace RPU {
/******************************************************************************************/
/* MixedPrecRPUDeviceCuda
CUDA implementation of MixedPrecRPUDevice
*/
template <typename T>
MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(CudaContext *c, int x_size, int d_size)
: MixedPrecRPUDeviceBaseCuda<T>(c, x_size, d_size){};
template <typename T>
MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(
CudaContext *c, const MixedPrecRPUDevice<T> &rpu_device)
: MixedPrecRPUDeviceCuda<T>(c, rpu_device.getXSize(), rpu_device.getDSize()) {
populateFrom(rpu_device);
};
template <typename T> void MixedPrecRPUDeviceCuda<T>::allocateContainers() {
this->context_->synchronizeDevice();
dev_chi_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_);
nblocks_batch_max_ = this->context_->getSMCount() *
(this->context_->maxThreadsPerBlock() / this->context_->getNThreads());
}
// copy
template <typename T>
MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(const MixedPrecRPUDeviceCuda<T> &other)
: MixedPrecRPUDeviceBaseCuda<T>(other) {
allocateContainers();
dev_chi_->assign(*other.dev_chi_);
this->context_->synchronize();
};
template <typename T>
MixedPrecRPUDeviceCuda<T> &
MixedPrecRPUDeviceCuda<T>::operator=(const MixedPrecRPUDeviceCuda<T> &other) {
MixedPrecRPUDeviceCuda<T> tmp(other);
swap(*this, tmp);
return *this;
};
template <typename T>
MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(MixedPrecRPUDeviceCuda<T> &&other) {
*this = std::move(other);
};
template <typename T>
MixedPrecRPUDeviceCuda<T> &MixedPrecRPUDeviceCuda<T>::operator=(MixedPrecRPUDeviceCuda<T> &&other) {
MixedPrecRPUDeviceBaseCuda<T>::operator=(std::move(other));
dev_chi_ = std::move(other.dev_chi_);
nblocks_batch_max_ = nblocks_batch_max_;
return *this;
}
template <typename T>
void MixedPrecRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) {
const auto &rpu_device = dynamic_cast<const MixedPrecRPUDevice<T> &>(rpu_device_in);
if (&rpu_device == nullptr) {
RPU_FATAL("populateFrom expects MixedPrecRPUDevice.");
}
MixedPrecRPUDeviceBaseCuda<T>::populateFrom(rpu_device_in); // will set sizes
allocateContainers();
const auto &par = this->getPar();
std::vector<T> v;
v.resize(this->size_);
rpu_device.getChi(v.data());
dev_chi_->assign(v.data()); // both in x-major
this->context_->synchronize();
}
template <typename T>
__global__ void kernelQuantizeBatch(
T *quantized_values,
const T *values,
const T *nm_values,
const int n_bins,
const int size_in,
const int m_batch_in,
const bool trans_in) {
volatile unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int size = size_in;
int m_batch = m_batch_in;
int total_size = size * m_batch;
bool trans = trans_in;
T half_bins = (T)(n_bins / 2); // floor
T res = (T)1.0 / ((T)half_bins);
T value;
int total_threads = blockDim.x * gridDim.x;
for (int i_stride = 0; i_stride < total_size; i_stride += total_threads) {
int idx = i_stride + tid;
if (idx < total_size) {
value = values[idx];
int sidx = trans ? (idx % m_batch) : (idx / size);
T amax = nm_values[sidx]; // amax from noise management
value = amax > 0.0 ? value / amax : value;
value = RPU_ROUNDFUN(value / res);
value = MIN(MAX(value, -half_bins), half_bins) * amax * res;
quantized_values[idx] = value;
}
}
}
template <typename T>
const T *MixedPrecRPUDeviceCuda<T>::quantize(
T *buffer_values,
const T *values,
RPU::NoiseManager<T> *nm,
int n_bins,
int size,
int m_batch,
bool trans) {
if (n_bins <= 0) {
return values;
}
nm->compute(values, NoiseManagementType::AbsMax, this->io_, m_batch, trans, false);
int nthreads = this->context_->getNThreads();
int nblocks = this->context_->getNBlocks(m_batch * size, nthreads);
int nblocks_batch = MIN(nblocks_batch_max_, nblocks);
hipStream_t s = this->context_->getStream();
hipLaunchKernelGGL(( kernelQuantizeBatch), dim3(nblocks_batch), dim3(nthreads), 0, s,
buffer_values, values, nm->getScaleValues(), n_bins, size, m_batch, trans);
return buffer_values;
}
template <typename T>
void MixedPrecRPUDeviceCuda<T>::doDirectUpdate(
const T *x_input,
const T *d_input,
T *dev_weights,
const T lr,
const int m_batch,
const bool x_trans,
const bool d_trans,
const T beta,
const PulsedUpdateMetaParameter<T> &up,
T *x_buffer,
T *d_buffer) {
if (beta != 1.0f) {
RPU_FATAL("beta not equal 1 is not supported.")
}
this->setUpPar(up);
const auto &par = getPar();
const T *d_val = quantize(
d_buffer, d_input, &*this->noise_manager_d_, par.n_d_bins, this->d_size_, m_batch, d_trans);
// % Quantize x
const T *x_val = quantize(
x_buffer, x_input, &*this->noise_manager_x_, par.n_x_bins, this->x_size_, m_batch, x_trans);
// dev_chi is x-size (row) major !! (to facilitate the readout below)
if (m_batch == 1) {
RPU::math::ger<T>(
this->context_, this->x_size_, this->d_size_, lr, x_val, 1, d_val, 1, dev_chi_->getData(),
this->x_size_);
} else {
RPU::math::gemm<T>(
this->context_, x_trans, !d_trans, this->x_size_, this->d_size_, m_batch, lr, x_val,
x_trans ? m_batch : this->x_size_, d_val, d_trans ? m_batch : this->d_size_,
1.0, // set beta to 1.0. We want to add to Chi
dev_chi_->getData(), this->x_size_);
}
this->doTransfer(dev_weights, 1.0, m_batch);
this->advanceUpdateCounter();
this->computeSparsity(x_buffer, d_buffer, m_batch);
}
template <typename T>
__global__ void
kernelMixedPrecTransfer(T *transfer_out, T *chi, const int size, const T granularity_) {
volatile unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
T value = chi[tid];
T dw = truncf(value / granularity_);
transfer_out[tid] = dw;
chi[tid] = value - granularity_ * dw;
}
}
template <typename T>
void MixedPrecRPUDeviceCuda<T>::forwardUpdate(
T *dev_weights,
const T lr,
int i_row_start,
const T *transfer_vec,
const int n_vec,
const bool trans) {
if (!lr) {
return;
}
T t_size = n_vec * this->x_size_;
if ((this->dev_transfer_tmp_ == nullptr) || this->dev_transfer_tmp_->getSize() < t_size) {
this->dev_transfer_tmp_ = RPU::make_unique<CudaArray<T>>(this->context_, t_size);
}
const auto &par = this->getPar();
int nthreads = this->context_->getNThreads();
int nblocks = this->context_->getNBlocks(t_size, nthreads);
hipLaunchKernelGGL(( kernelMixedPrecTransfer<T>), dim3(nblocks), dim3(nthreads), 0, this->context_->getStream(),
this->dev_transfer_tmp_->getData(), dev_chi_->getData() + i_row_start * this->x_size_, t_size,
this->granularity_);
// requires to turn on update_managment / bl managment as well
this->transfer_pwu_->update(
this->dev_transfer_tmp_->getDataConst(), // this is the transfer vector (x_size)
transfer_vec, // this should be d_size, non-trans
dev_weights, &*this->rpucuda_device_, this->up_, this->granularity_, n_vec, trans, false);
}
template <typename T> std::vector<T> MixedPrecRPUDeviceCuda<T>::getHiddenWeights() const {
auto data = MixedPrecRPUDeviceBaseCuda<T>::getHiddenWeights();
int offset = data.size();
data.resize(offset + this->size_);
dev_chi_->copyTo(data.data() + offset);
return data;
}
template class MixedPrecRPUDeviceCuda<float>;
#ifdef RPU_USE_DOUBLE
template class MixedPrecRPUDeviceCuda<double>;
#endif
} // namespace RPU
| a79b43fbc854a2196dc0c458789c4a1929cf3968.cu | /**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "io_iterator.h"
#include "rpu_pulsed_meta_parameter.h"
#include "rpucuda_mixedprec_device.h"
#include <cub/cub.cuh>
#include <memory>
namespace RPU {
/******************************************************************************************/
/* MixedPrecRPUDeviceCuda
CUDA implementation of MixedPrecRPUDevice
*/
template <typename T>
MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(CudaContext *c, int x_size, int d_size)
: MixedPrecRPUDeviceBaseCuda<T>(c, x_size, d_size){};
template <typename T>
MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(
CudaContext *c, const MixedPrecRPUDevice<T> &rpu_device)
: MixedPrecRPUDeviceCuda<T>(c, rpu_device.getXSize(), rpu_device.getDSize()) {
populateFrom(rpu_device);
};
template <typename T> void MixedPrecRPUDeviceCuda<T>::allocateContainers() {
this->context_->synchronizeDevice();
dev_chi_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_);
nblocks_batch_max_ = this->context_->getSMCount() *
(this->context_->maxThreadsPerBlock() / this->context_->getNThreads());
}
// copy
template <typename T>
MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(const MixedPrecRPUDeviceCuda<T> &other)
: MixedPrecRPUDeviceBaseCuda<T>(other) {
allocateContainers();
dev_chi_->assign(*other.dev_chi_);
this->context_->synchronize();
};
template <typename T>
MixedPrecRPUDeviceCuda<T> &
MixedPrecRPUDeviceCuda<T>::operator=(const MixedPrecRPUDeviceCuda<T> &other) {
MixedPrecRPUDeviceCuda<T> tmp(other);
swap(*this, tmp);
return *this;
};
template <typename T>
MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(MixedPrecRPUDeviceCuda<T> &&other) {
*this = std::move(other);
};
template <typename T>
MixedPrecRPUDeviceCuda<T> &MixedPrecRPUDeviceCuda<T>::operator=(MixedPrecRPUDeviceCuda<T> &&other) {
MixedPrecRPUDeviceBaseCuda<T>::operator=(std::move(other));
dev_chi_ = std::move(other.dev_chi_);
nblocks_batch_max_ = nblocks_batch_max_;
return *this;
}
template <typename T>
void MixedPrecRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) {
const auto &rpu_device = dynamic_cast<const MixedPrecRPUDevice<T> &>(rpu_device_in);
if (&rpu_device == nullptr) {
RPU_FATAL("populateFrom expects MixedPrecRPUDevice.");
}
MixedPrecRPUDeviceBaseCuda<T>::populateFrom(rpu_device_in); // will set sizes
allocateContainers();
const auto &par = this->getPar();
std::vector<T> v;
v.resize(this->size_);
rpu_device.getChi(v.data());
dev_chi_->assign(v.data()); // both in x-major
this->context_->synchronize();
}
template <typename T>
__global__ void kernelQuantizeBatch(
T *quantized_values,
const T *values,
const T *nm_values,
const int n_bins,
const int size_in,
const int m_batch_in,
const bool trans_in) {
volatile unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int size = size_in;
int m_batch = m_batch_in;
int total_size = size * m_batch;
bool trans = trans_in;
T half_bins = (T)(n_bins / 2); // floor
T res = (T)1.0 / ((T)half_bins);
T value;
int total_threads = blockDim.x * gridDim.x;
for (int i_stride = 0; i_stride < total_size; i_stride += total_threads) {
int idx = i_stride + tid;
if (idx < total_size) {
value = values[idx];
int sidx = trans ? (idx % m_batch) : (idx / size);
T amax = nm_values[sidx]; // amax from noise management
value = amax > 0.0 ? value / amax : value;
value = RPU_ROUNDFUN(value / res);
value = MIN(MAX(value, -half_bins), half_bins) * amax * res;
quantized_values[idx] = value;
}
}
}
template <typename T>
const T *MixedPrecRPUDeviceCuda<T>::quantize(
T *buffer_values,
const T *values,
RPU::NoiseManager<T> *nm,
int n_bins,
int size,
int m_batch,
bool trans) {
if (n_bins <= 0) {
return values;
}
nm->compute(values, NoiseManagementType::AbsMax, this->io_, m_batch, trans, false);
int nthreads = this->context_->getNThreads();
int nblocks = this->context_->getNBlocks(m_batch * size, nthreads);
int nblocks_batch = MIN(nblocks_batch_max_, nblocks);
cudaStream_t s = this->context_->getStream();
kernelQuantizeBatch<<<nblocks_batch, nthreads, 0, s>>>(
buffer_values, values, nm->getScaleValues(), n_bins, size, m_batch, trans);
return buffer_values;
}
template <typename T>
void MixedPrecRPUDeviceCuda<T>::doDirectUpdate(
const T *x_input,
const T *d_input,
T *dev_weights,
const T lr,
const int m_batch,
const bool x_trans,
const bool d_trans,
const T beta,
const PulsedUpdateMetaParameter<T> &up,
T *x_buffer,
T *d_buffer) {
if (beta != 1.0f) {
RPU_FATAL("beta not equal 1 is not supported.")
}
this->setUpPar(up);
const auto &par = getPar();
const T *d_val = quantize(
d_buffer, d_input, &*this->noise_manager_d_, par.n_d_bins, this->d_size_, m_batch, d_trans);
// % Quantize x
const T *x_val = quantize(
x_buffer, x_input, &*this->noise_manager_x_, par.n_x_bins, this->x_size_, m_batch, x_trans);
// dev_chi is x-size (row) major !! (to facilitate the readout below)
if (m_batch == 1) {
RPU::math::ger<T>(
this->context_, this->x_size_, this->d_size_, lr, x_val, 1, d_val, 1, dev_chi_->getData(),
this->x_size_);
} else {
RPU::math::gemm<T>(
this->context_, x_trans, !d_trans, this->x_size_, this->d_size_, m_batch, lr, x_val,
x_trans ? m_batch : this->x_size_, d_val, d_trans ? m_batch : this->d_size_,
1.0, // set beta to 1.0. We want to add to Chi
dev_chi_->getData(), this->x_size_);
}
this->doTransfer(dev_weights, 1.0, m_batch);
this->advanceUpdateCounter();
this->computeSparsity(x_buffer, d_buffer, m_batch);
}
template <typename T>
__global__ void
kernelMixedPrecTransfer(T *transfer_out, T *chi, const int size, const T granularity_) {
volatile unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
T value = chi[tid];
T dw = truncf(value / granularity_);
transfer_out[tid] = dw;
chi[tid] = value - granularity_ * dw;
}
}
template <typename T>
void MixedPrecRPUDeviceCuda<T>::forwardUpdate(
T *dev_weights,
const T lr,
int i_row_start,
const T *transfer_vec,
const int n_vec,
const bool trans) {
if (!lr) {
return;
}
T t_size = n_vec * this->x_size_;
if ((this->dev_transfer_tmp_ == nullptr) || this->dev_transfer_tmp_->getSize() < t_size) {
this->dev_transfer_tmp_ = RPU::make_unique<CudaArray<T>>(this->context_, t_size);
}
const auto &par = this->getPar();
int nthreads = this->context_->getNThreads();
int nblocks = this->context_->getNBlocks(t_size, nthreads);
kernelMixedPrecTransfer<T><<<nblocks, nthreads, 0, this->context_->getStream()>>>(
this->dev_transfer_tmp_->getData(), dev_chi_->getData() + i_row_start * this->x_size_, t_size,
this->granularity_);
// requires to turn on update_managment / bl managment as well
this->transfer_pwu_->update(
this->dev_transfer_tmp_->getDataConst(), // this is the transfer vector (x_size)
transfer_vec, // this should be d_size, non-trans
dev_weights, &*this->rpucuda_device_, this->up_, this->granularity_, n_vec, trans, false);
}
template <typename T> std::vector<T> MixedPrecRPUDeviceCuda<T>::getHiddenWeights() const {
auto data = MixedPrecRPUDeviceBaseCuda<T>::getHiddenWeights();
int offset = data.size();
data.resize(offset + this->size_);
dev_chi_->copyTo(data.data() + offset);
return data;
}
template class MixedPrecRPUDeviceCuda<float>;
#ifdef RPU_USE_DOUBLE
template class MixedPrecRPUDeviceCuda<double>;
#endif
} // namespace RPU
|
678130344e8f5e19b88792a5f3eca7b925cba4c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2019 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <algorithm>
#include <bitset>
#include <string>
#include <sstream>
#include <set>
#include "xgboost/logging.h"
#include "xgboost/span.h"
#include "constraints.cuh"
#include "param.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
size_t FeatureInteractionConstraint::Features() const {
return d_sets_ptr_.size() - 1;
}
void FeatureInteractionConstraint::Configure(
tree::TrainParam const& param, int32_t const n_features) {
has_constraint_ = true;
if (param.interaction_constraints.length() == 0) {
has_constraint_ = false;
return;
}
// --- Parse interaction constraints
std::istringstream iss(param.interaction_constraints);
dmlc::JSONReader reader(&iss);
// Interaction constraints parsed from string parameter. After
// parsing, this looks like {{0, 1, 2}, {2, 3 ,4}}.
std::vector<std::vector<int32_t>> h_feature_constraints;
try {
reader.Read(&h_feature_constraints);
} catch (dmlc::Error const& e) {
LOG(FATAL) << "Failed to parse feature interaction constraint:\n"
<< param.interaction_constraints << "\n"
<< "With error:\n" << e.what();
}
n_sets_ = h_feature_constraints.size();
size_t const n_feat_storage = LBitField64::ComputeStorageSize(n_features);
if (n_feat_storage == 0 && n_features != 0) {
LOG(FATAL) << "Wrong storage size, n_features: " << n_features;
}
// --- Initialize allowed features attached to nodes.
if (param.max_depth == 0 && param.max_leaves == 0) {
LOG(FATAL) << "Max leaves and max depth cannot both be unconstrained for gpu_hist.";
}
int32_t n_nodes {0};
if (param.max_depth != 0) {
n_nodes = ::pow(2, param.max_depth + 1);
} else {
n_nodes = param.max_leaves * 2 - 1;
}
CHECK_NE(n_nodes, 0);
node_constraints_.resize(n_nodes);
node_constraints_storage_.resize(n_nodes);
for (auto& n : node_constraints_storage_) {
n.resize(LBitField64::ComputeStorageSize(n_features));
}
for (size_t i = 0; i < node_constraints_storage_.size(); ++i) {
auto span = dh::ToSpan(node_constraints_storage_[i]);
node_constraints_[i] = LBitField64(span);
}
s_node_constraints_ = common::Span<LBitField64>(node_constraints_.data(),
node_constraints_.size());
// Represent constraints as CSR format, flatten is the value vector,
// ptr is row_ptr vector in CSR.
std::vector<int32_t> h_feature_constraints_flatten;
for (auto const& constraints : h_feature_constraints) {
for (int32_t c : constraints) {
h_feature_constraints_flatten.emplace_back(c);
}
}
std::vector<int32_t> h_feature_constraints_ptr;
size_t n_features_in_constraints = 0;
h_feature_constraints_ptr.emplace_back(n_features_in_constraints);
for (auto const& v : h_feature_constraints) {
n_features_in_constraints += v.size();
h_feature_constraints_ptr.emplace_back(n_features_in_constraints);
}
// Copy the CSR to device.
d_fconstraints_.resize(h_feature_constraints_flatten.size());
thrust::copy(h_feature_constraints_flatten.cbegin(), h_feature_constraints_flatten.cend(),
d_fconstraints_.begin());
s_fconstraints_ = dh::ToSpan(d_fconstraints_);
d_fconstraints_ptr_.resize(h_feature_constraints_ptr.size());
thrust::copy(h_feature_constraints_ptr.cbegin(), h_feature_constraints_ptr.cend(),
d_fconstraints_ptr_.begin());
s_fconstraints_ptr_ = dh::ToSpan(d_fconstraints_ptr_);
// --- Compute interaction sets attached to each feature.
// Use a set to eliminate duplicated entries.
std::vector<std::set<int32_t> > h_features_set(n_features);
int32_t cid = 0;
for (auto const& constraints : h_feature_constraints) {
for (auto const& feat : constraints) {
h_features_set.at(feat).insert(cid);
}
cid++;
}
// Compute device sets.
std::vector<int32_t> h_sets;
int32_t ptr = 0;
std::vector<int32_t> h_sets_ptr {ptr};
for (auto const& feature : h_features_set) {
for (auto constraint_id : feature) {
h_sets.emplace_back(constraint_id);
}
// empty set is well defined here.
ptr += feature.size();
h_sets_ptr.emplace_back(ptr);
}
d_sets_ = h_sets;
d_sets_ptr_ = h_sets_ptr;
s_sets_ = dh::ToSpan(d_sets_);
s_sets_ptr_ = dh::ToSpan(d_sets_ptr_);
d_feature_buffer_storage_.resize(LBitField64::ComputeStorageSize(n_features));
feature_buffer_ = dh::ToSpan(d_feature_buffer_storage_);
// --- Initialize result buffers.
output_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features));
output_buffer_bits_ = LBitField64(dh::ToSpan(output_buffer_bits_storage_));
input_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features));
input_buffer_bits_ = LBitField64(dh::ToSpan(input_buffer_bits_storage_));
result_buffer_.resize(n_features);
s_result_buffer_ = dh::ToSpan(result_buffer_);
}
FeatureInteractionConstraint::FeatureInteractionConstraint(
tree::TrainParam const& param, int32_t const n_features) :
has_constraint_{true}, n_sets_{0} {
this->Configure(param, n_features);
}
void FeatureInteractionConstraint::Reset() {
for (auto& node : node_constraints_storage_) {
thrust::fill(node.begin(), node.end(), 0);
}
}
__global__ void ClearBuffersKernel(
LBitField64 result_buffer_output, LBitField64 result_buffer_input) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < result_buffer_output.Size()) {
result_buffer_output.Clear(tid);
}
if (tid < result_buffer_input.Size()) {
result_buffer_input.Clear(tid);
}
}
void FeatureInteractionConstraint::ClearBuffers() {
CHECK_EQ(output_buffer_bits_.Size(), input_buffer_bits_.Size());
CHECK_LE(feature_buffer_.Size(), output_buffer_bits_.Size());
uint32_t constexpr kBlockThreads = 256;
auto const n_grids = static_cast<uint32_t>(
common::DivRoundUp(input_buffer_bits_.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
ClearBuffersKernel,
output_buffer_bits_, input_buffer_bits_);
}
common::Span<int32_t> FeatureInteractionConstraint::QueryNode(int32_t node_id) {
if (!has_constraint_) { return {}; }
CHECK_LT(node_id, s_node_constraints_.size());
ClearBuffers();
thrust::counting_iterator<int32_t> begin(0);
thrust::counting_iterator<int32_t> end(result_buffer_.size());
auto p_result_buffer = result_buffer_.data();
LBitField64 node_constraints = s_node_constraints_[node_id];
thrust::device_ptr<int32_t> const out_end = thrust::copy_if(
thrust::device,
begin, end,
p_result_buffer,
[=]__device__(int32_t pos) {
bool res = node_constraints.Check(pos);
return res;
});
size_t const n_available = std::distance(result_buffer_.data(), out_end);
return {s_result_buffer_.data(), s_result_buffer_.data() + n_available};
}
__global__ void SetInputBufferKernel(common::Span<int32_t> feature_list_input,
LBitField64 result_buffer_input) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < feature_list_input.size()) {
result_buffer_input.Set(feature_list_input[tid]);
}
}
__global__ void QueryFeatureListKernel(LBitField64 node_constraints,
LBitField64 result_buffer_input,
LBitField64 result_buffer_output) {
result_buffer_output |= node_constraints;
result_buffer_output &= result_buffer_input;
}
common::Span<int32_t> FeatureInteractionConstraint::Query(
common::Span<int32_t> feature_list, int32_t nid) {
if (!has_constraint_ || nid == 0) {
return feature_list;
}
ClearBuffers();
LBitField64 node_constraints = s_node_constraints_[nid];
CHECK_EQ(input_buffer_bits_.Size(), output_buffer_bits_.Size());
uint32_t constexpr kBlockThreads = 256;
auto n_grids = static_cast<uint32_t>(
common::DivRoundUp(output_buffer_bits_.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
SetInputBufferKernel,
feature_list, input_buffer_bits_);
dh::LaunchKernel {n_grids, kBlockThreads} (
QueryFeatureListKernel,
node_constraints, input_buffer_bits_, output_buffer_bits_);
thrust::counting_iterator<int32_t> begin(0);
thrust::counting_iterator<int32_t> end(result_buffer_.size());
LBitField64 local_result_buffer = output_buffer_bits_;
thrust::device_ptr<int32_t> const out_end = thrust::copy_if(
thrust::device,
begin, end,
result_buffer_.data(),
[=]__device__(int32_t pos) {
bool res = local_result_buffer.Check(pos);
return res;
});
size_t const n_available = std::distance(result_buffer_.data(), out_end);
common::Span<int32_t> result =
{s_result_buffer_.data(), s_result_buffer_.data() + n_available};
return result;
}
// Find interaction sets for each feature, then store all features in
// those sets in a buffer.
__global__ void RestoreFeatureListFromSetsKernel(
LBitField64 feature_buffer,
int32_t fid,
common::Span<int32_t> feature_interactions,
common::Span<int32_t> feature_interactions_ptr, // of size n interaction set + 1
common::Span<int32_t> interactions_list,
common::Span<int32_t> interactions_list_ptr) {
auto const tid_x = threadIdx.x + blockIdx.x * blockDim.x;
auto const tid_y = threadIdx.y + blockIdx.y * blockDim.y;
// painful mapping: fid -> sets related to it -> features related to sets.
auto const beg = interactions_list_ptr[fid];
auto const end = interactions_list_ptr[fid+1];
auto const n_sets = end - beg;
if (tid_x < n_sets) {
auto const set_id_pos = beg + tid_x;
auto const set_id = interactions_list[set_id_pos];
auto const set_beg = feature_interactions_ptr[set_id];
auto const set_end = feature_interactions_ptr[set_id + 1];
auto const feature_pos = set_beg + tid_y;
if (feature_pos < set_end) {
feature_buffer.Set(feature_interactions[feature_pos]);
}
}
}
__global__ void InteractionConstraintSplitKernel(LBitField64 feature,
int32_t feature_id,
LBitField64 node,
LBitField64 left,
LBitField64 right) {
auto tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid > node.Size()) {
return;
}
// enable constraints from feature
node |= feature;
// clear the buffer after use
if (tid < feature.Size()) {
feature.Clear(tid);
}
// enable constraints from parent
left |= node;
right |= node;
if (tid == feature_id) {
// enable the split feature, set all of them at last instead of
// setting it for parent to avoid race.
node.Set(feature_id);
left.Set(feature_id);
right.Set(feature_id);
}
}
void FeatureInteractionConstraint::Split(
int32_t node_id, int32_t feature_id, int32_t left_id, int32_t right_id) {
if (!has_constraint_) { return; }
CHECK_NE(node_id, left_id)
<< " Split node: " << node_id << " and its left child: "
<< left_id << " cannot be the same.";
CHECK_NE(node_id, right_id)
<< " Split node: " << node_id << " and its left child: "
<< right_id << " cannot be the same.";
CHECK_LT(right_id, s_node_constraints_.size());
CHECK_NE(s_node_constraints_.size(), 0);
LBitField64 node = s_node_constraints_[node_id];
LBitField64 left = s_node_constraints_[left_id];
LBitField64 right = s_node_constraints_[right_id];
dim3 const block3(16, 64, 1);
dim3 const grid3(common::DivRoundUp(n_sets_, 16),
common::DivRoundUp(s_fconstraints_.size(), 64));
dh::LaunchKernel {grid3, block3} (
RestoreFeatureListFromSetsKernel,
feature_buffer_, feature_id,
s_fconstraints_, s_fconstraints_ptr_,
s_sets_, s_sets_ptr_);
uint32_t constexpr kBlockThreads = 256;
auto n_grids = static_cast<uint32_t>(common::DivRoundUp(node.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
InteractionConstraintSplitKernel,
feature_buffer_,
feature_id,
node, left, right);
}
} // namespace xgboost
| 678130344e8f5e19b88792a5f3eca7b925cba4c3.cu | /*!
* Copyright 2019 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <algorithm>
#include <bitset>
#include <string>
#include <sstream>
#include <set>
#include "xgboost/logging.h"
#include "xgboost/span.h"
#include "constraints.cuh"
#include "param.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
size_t FeatureInteractionConstraint::Features() const {
return d_sets_ptr_.size() - 1;
}
void FeatureInteractionConstraint::Configure(
tree::TrainParam const& param, int32_t const n_features) {
has_constraint_ = true;
if (param.interaction_constraints.length() == 0) {
has_constraint_ = false;
return;
}
// --- Parse interaction constraints
std::istringstream iss(param.interaction_constraints);
dmlc::JSONReader reader(&iss);
// Interaction constraints parsed from string parameter. After
// parsing, this looks like {{0, 1, 2}, {2, 3 ,4}}.
std::vector<std::vector<int32_t>> h_feature_constraints;
try {
reader.Read(&h_feature_constraints);
} catch (dmlc::Error const& e) {
LOG(FATAL) << "Failed to parse feature interaction constraint:\n"
<< param.interaction_constraints << "\n"
<< "With error:\n" << e.what();
}
n_sets_ = h_feature_constraints.size();
size_t const n_feat_storage = LBitField64::ComputeStorageSize(n_features);
if (n_feat_storage == 0 && n_features != 0) {
LOG(FATAL) << "Wrong storage size, n_features: " << n_features;
}
// --- Initialize allowed features attached to nodes.
if (param.max_depth == 0 && param.max_leaves == 0) {
LOG(FATAL) << "Max leaves and max depth cannot both be unconstrained for gpu_hist.";
}
int32_t n_nodes {0};
if (param.max_depth != 0) {
n_nodes = std::pow(2, param.max_depth + 1);
} else {
n_nodes = param.max_leaves * 2 - 1;
}
CHECK_NE(n_nodes, 0);
node_constraints_.resize(n_nodes);
node_constraints_storage_.resize(n_nodes);
for (auto& n : node_constraints_storage_) {
n.resize(LBitField64::ComputeStorageSize(n_features));
}
for (size_t i = 0; i < node_constraints_storage_.size(); ++i) {
auto span = dh::ToSpan(node_constraints_storage_[i]);
node_constraints_[i] = LBitField64(span);
}
s_node_constraints_ = common::Span<LBitField64>(node_constraints_.data(),
node_constraints_.size());
// Represent constraints as CSR format, flatten is the value vector,
// ptr is row_ptr vector in CSR.
std::vector<int32_t> h_feature_constraints_flatten;
for (auto const& constraints : h_feature_constraints) {
for (int32_t c : constraints) {
h_feature_constraints_flatten.emplace_back(c);
}
}
std::vector<int32_t> h_feature_constraints_ptr;
size_t n_features_in_constraints = 0;
h_feature_constraints_ptr.emplace_back(n_features_in_constraints);
for (auto const& v : h_feature_constraints) {
n_features_in_constraints += v.size();
h_feature_constraints_ptr.emplace_back(n_features_in_constraints);
}
// Copy the CSR to device.
d_fconstraints_.resize(h_feature_constraints_flatten.size());
thrust::copy(h_feature_constraints_flatten.cbegin(), h_feature_constraints_flatten.cend(),
d_fconstraints_.begin());
s_fconstraints_ = dh::ToSpan(d_fconstraints_);
d_fconstraints_ptr_.resize(h_feature_constraints_ptr.size());
thrust::copy(h_feature_constraints_ptr.cbegin(), h_feature_constraints_ptr.cend(),
d_fconstraints_ptr_.begin());
s_fconstraints_ptr_ = dh::ToSpan(d_fconstraints_ptr_);
// --- Compute interaction sets attached to each feature.
// Use a set to eliminate duplicated entries.
std::vector<std::set<int32_t> > h_features_set(n_features);
int32_t cid = 0;
for (auto const& constraints : h_feature_constraints) {
for (auto const& feat : constraints) {
h_features_set.at(feat).insert(cid);
}
cid++;
}
// Compute device sets.
std::vector<int32_t> h_sets;
int32_t ptr = 0;
std::vector<int32_t> h_sets_ptr {ptr};
for (auto const& feature : h_features_set) {
for (auto constraint_id : feature) {
h_sets.emplace_back(constraint_id);
}
// empty set is well defined here.
ptr += feature.size();
h_sets_ptr.emplace_back(ptr);
}
d_sets_ = h_sets;
d_sets_ptr_ = h_sets_ptr;
s_sets_ = dh::ToSpan(d_sets_);
s_sets_ptr_ = dh::ToSpan(d_sets_ptr_);
d_feature_buffer_storage_.resize(LBitField64::ComputeStorageSize(n_features));
feature_buffer_ = dh::ToSpan(d_feature_buffer_storage_);
// --- Initialize result buffers.
output_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features));
output_buffer_bits_ = LBitField64(dh::ToSpan(output_buffer_bits_storage_));
input_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features));
input_buffer_bits_ = LBitField64(dh::ToSpan(input_buffer_bits_storage_));
result_buffer_.resize(n_features);
s_result_buffer_ = dh::ToSpan(result_buffer_);
}
FeatureInteractionConstraint::FeatureInteractionConstraint(
tree::TrainParam const& param, int32_t const n_features) :
has_constraint_{true}, n_sets_{0} {
this->Configure(param, n_features);
}
void FeatureInteractionConstraint::Reset() {
for (auto& node : node_constraints_storage_) {
thrust::fill(node.begin(), node.end(), 0);
}
}
__global__ void ClearBuffersKernel(
LBitField64 result_buffer_output, LBitField64 result_buffer_input) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < result_buffer_output.Size()) {
result_buffer_output.Clear(tid);
}
if (tid < result_buffer_input.Size()) {
result_buffer_input.Clear(tid);
}
}
void FeatureInteractionConstraint::ClearBuffers() {
CHECK_EQ(output_buffer_bits_.Size(), input_buffer_bits_.Size());
CHECK_LE(feature_buffer_.Size(), output_buffer_bits_.Size());
uint32_t constexpr kBlockThreads = 256;
auto const n_grids = static_cast<uint32_t>(
common::DivRoundUp(input_buffer_bits_.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
ClearBuffersKernel,
output_buffer_bits_, input_buffer_bits_);
}
common::Span<int32_t> FeatureInteractionConstraint::QueryNode(int32_t node_id) {
if (!has_constraint_) { return {}; }
CHECK_LT(node_id, s_node_constraints_.size());
ClearBuffers();
thrust::counting_iterator<int32_t> begin(0);
thrust::counting_iterator<int32_t> end(result_buffer_.size());
auto p_result_buffer = result_buffer_.data();
LBitField64 node_constraints = s_node_constraints_[node_id];
thrust::device_ptr<int32_t> const out_end = thrust::copy_if(
thrust::device,
begin, end,
p_result_buffer,
[=]__device__(int32_t pos) {
bool res = node_constraints.Check(pos);
return res;
});
size_t const n_available = std::distance(result_buffer_.data(), out_end);
return {s_result_buffer_.data(), s_result_buffer_.data() + n_available};
}
__global__ void SetInputBufferKernel(common::Span<int32_t> feature_list_input,
LBitField64 result_buffer_input) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < feature_list_input.size()) {
result_buffer_input.Set(feature_list_input[tid]);
}
}
__global__ void QueryFeatureListKernel(LBitField64 node_constraints,
LBitField64 result_buffer_input,
LBitField64 result_buffer_output) {
result_buffer_output |= node_constraints;
result_buffer_output &= result_buffer_input;
}
common::Span<int32_t> FeatureInteractionConstraint::Query(
common::Span<int32_t> feature_list, int32_t nid) {
if (!has_constraint_ || nid == 0) {
return feature_list;
}
ClearBuffers();
LBitField64 node_constraints = s_node_constraints_[nid];
CHECK_EQ(input_buffer_bits_.Size(), output_buffer_bits_.Size());
uint32_t constexpr kBlockThreads = 256;
auto n_grids = static_cast<uint32_t>(
common::DivRoundUp(output_buffer_bits_.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
SetInputBufferKernel,
feature_list, input_buffer_bits_);
dh::LaunchKernel {n_grids, kBlockThreads} (
QueryFeatureListKernel,
node_constraints, input_buffer_bits_, output_buffer_bits_);
thrust::counting_iterator<int32_t> begin(0);
thrust::counting_iterator<int32_t> end(result_buffer_.size());
LBitField64 local_result_buffer = output_buffer_bits_;
thrust::device_ptr<int32_t> const out_end = thrust::copy_if(
thrust::device,
begin, end,
result_buffer_.data(),
[=]__device__(int32_t pos) {
bool res = local_result_buffer.Check(pos);
return res;
});
size_t const n_available = std::distance(result_buffer_.data(), out_end);
common::Span<int32_t> result =
{s_result_buffer_.data(), s_result_buffer_.data() + n_available};
return result;
}
// Find interaction sets for each feature, then store all features in
// those sets in a buffer.
__global__ void RestoreFeatureListFromSetsKernel(
LBitField64 feature_buffer,
int32_t fid,
common::Span<int32_t> feature_interactions,
common::Span<int32_t> feature_interactions_ptr, // of size n interaction set + 1
common::Span<int32_t> interactions_list,
common::Span<int32_t> interactions_list_ptr) {
auto const tid_x = threadIdx.x + blockIdx.x * blockDim.x;
auto const tid_y = threadIdx.y + blockIdx.y * blockDim.y;
// painful mapping: fid -> sets related to it -> features related to sets.
auto const beg = interactions_list_ptr[fid];
auto const end = interactions_list_ptr[fid+1];
auto const n_sets = end - beg;
if (tid_x < n_sets) {
auto const set_id_pos = beg + tid_x;
auto const set_id = interactions_list[set_id_pos];
auto const set_beg = feature_interactions_ptr[set_id];
auto const set_end = feature_interactions_ptr[set_id + 1];
auto const feature_pos = set_beg + tid_y;
if (feature_pos < set_end) {
feature_buffer.Set(feature_interactions[feature_pos]);
}
}
}
__global__ void InteractionConstraintSplitKernel(LBitField64 feature,
int32_t feature_id,
LBitField64 node,
LBitField64 left,
LBitField64 right) {
auto tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid > node.Size()) {
return;
}
// enable constraints from feature
node |= feature;
// clear the buffer after use
if (tid < feature.Size()) {
feature.Clear(tid);
}
// enable constraints from parent
left |= node;
right |= node;
if (tid == feature_id) {
// enable the split feature, set all of them at last instead of
// setting it for parent to avoid race.
node.Set(feature_id);
left.Set(feature_id);
right.Set(feature_id);
}
}
void FeatureInteractionConstraint::Split(
int32_t node_id, int32_t feature_id, int32_t left_id, int32_t right_id) {
if (!has_constraint_) { return; }
CHECK_NE(node_id, left_id)
<< " Split node: " << node_id << " and its left child: "
<< left_id << " cannot be the same.";
CHECK_NE(node_id, right_id)
<< " Split node: " << node_id << " and its left child: "
<< right_id << " cannot be the same.";
CHECK_LT(right_id, s_node_constraints_.size());
CHECK_NE(s_node_constraints_.size(), 0);
LBitField64 node = s_node_constraints_[node_id];
LBitField64 left = s_node_constraints_[left_id];
LBitField64 right = s_node_constraints_[right_id];
dim3 const block3(16, 64, 1);
dim3 const grid3(common::DivRoundUp(n_sets_, 16),
common::DivRoundUp(s_fconstraints_.size(), 64));
dh::LaunchKernel {grid3, block3} (
RestoreFeatureListFromSetsKernel,
feature_buffer_, feature_id,
s_fconstraints_, s_fconstraints_ptr_,
s_sets_, s_sets_ptr_);
uint32_t constexpr kBlockThreads = 256;
auto n_grids = static_cast<uint32_t>(common::DivRoundUp(node.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
InteractionConstraintSplitKernel,
feature_buffer_,
feature_id,
node, left, right);
}
} // namespace xgboost
|
71186b686907c68924e8268c4e96881f39f87d42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <functional>
#include <memory>
#include <vector>
#include "lite/backends/cuda/math/gemm.h"
#include "lite/core/op_registry.h"
#include "lite/core/target_wrapper.h"
#include "lite/core/tensor.h"
#include "lite/kernels/cuda/var_conv_2d_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
inline int ConvOutputSize(int input_size,
int filter_size,
int dilation,
int pad_left,
int pad_right,
int stride) {
const int dkernel = dilation * (filter_size - 1) + 1;
int output_size =
(input_size + (pad_left + pad_right) - dkernel) / stride + 1;
return output_size;
}
// Eliminate the effects of pad, support batch > 1.
template <typename dtype>
__global__ void eliminate_pad_effect(dtype* src,
const int64_t* offset,
const int num_batch,
const int batch_stride,
const int num_channel,
const int channel_stride,
const int num_height,
const int height_stride,
const int num_width,
const int width_stride,
const int count) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int thread_num = blockDim.x * gridDim.x;
for (tid = threadIdx.x + blockIdx.x * blockDim.x; tid < count;
tid += thread_num) {
int batch_id = tid / batch_stride;
int width_id = tid % num_width;
int cur_len = offset[batch_id + 1] - offset[batch_id];
if (width_id >= cur_len) {
src[tid] = 0.;
}
}
}
void VarConv2DCompute::PrepareForRun() {
auto& context = this->ctx_->template As<CUDAContext>();
auto stream = context.exec_stream();
auto& param = this->Param<param_t>();
conv_param_.x = const_cast<lite::Tensor*>(param.X);
conv_param_.var_length = true;
conv_param_.paddings.reset(new std::vector<int>);
conv_param_.paddings->push_back(static_cast<int>(param.kernel_h / 2));
conv_param_.paddings->push_back(static_cast<int>(param.kernel_h / 2));
conv_param_.paddings->push_back(static_cast<int>(param.kernel_w / 2));
conv_param_.paddings->push_back(static_cast<int>(param.kernel_w / 2));
conv_param_.dilations.reset(new std::vector<int>);
conv_param_.dilations->push_back(1);
conv_param_.dilations->push_back(1);
conv_param_.strides[0] = param.stride_h;
conv_param_.strides[1] = param.stride_w;
conv_param_.filter = const_cast<lite::Tensor*>(param.W);
conv_param_.filter->Resize({param.output_channel,
param.input_channel,
param.kernel_h,
param.kernel_w});
conv_param_.output = param.Out;
std::vector<int64_t> output_shape(
{conv_param_.x->dims()[0], param.output_channel});
for (size_t i = 0; i < conv_param_.strides.size(); ++i) {
output_shape.push_back(
ConvOutputSize(conv_param_.x->dims()[i + 2],
conv_param_.filter->dims()[i + 2],
(*conv_param_.dilations.get())[i],
(*conv_param_.paddings.get())[i * 2],
(*conv_param_.paddings.get())[i * 2 + 1],
conv_param_.strides[i]));
}
if (param.fuse_relu) {
conv_param_.activation_param.has_active = true;
conv_param_.activation_param.active_type = lite_api::ActivationType::kRelu;
}
conv_param_.output->Resize({output_shape});
conv_impl_.reset(new lite::cuda::math::CudnnConv2D<PRECISION(kFloat)>);
conv_impl_->init(conv_param_, &context);
}
void VarConv2DCompute::Run() {
auto& context = this->ctx_->template As<CUDAContext>();
auto stream = context.exec_stream();
auto& param = this->Param<param_t>();
param.Out->set_lod(param.X->lod());
std::vector<int64_t> output_shape(
{conv_param_.x->dims()[0], param.output_channel});
for (size_t i = 0; i < conv_param_.strides.size(); ++i) {
output_shape.push_back(
ConvOutputSize(conv_param_.x->dims()[i + 2],
conv_param_.filter->dims()[i + 2],
(*conv_param_.dilations.get())[i],
(*conv_param_.paddings.get())[i * 2],
(*conv_param_.paddings.get())[i * 2 + 1],
conv_param_.strides[i]));
}
conv_param_.output->Resize({output_shape});
conv_impl_->create(conv_param_, &context);
conv_impl_->run(conv_param_);
// Avoid situations where cascading conv does not support multiple batch
// calculations
float* out_data = param.Out->mutable_data<float>();
const int batch_num = output_shape[1] * output_shape[2] * output_shape[3];
std::vector<int64_t> lod(param.X->lod()[0].size(), 0);
for (size_t i = 0; i < param.X->lod()[0].size(); ++i) {
lod[i] = param.X->lod()[0][i];
}
int count = std::accumulate(
output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());
int width_stride = 1;
int height_stride = output_shape[3];
int channel_stride = output_shape[2] * output_shape[3];
int batch_stride = output_shape[1] * output_shape[2] * output_shape[3];
int threads = 512;
int blocks = (count + threads - 1) / threads;
offset_.Resize({static_cast<int64_t>(lod.size())});
int64_t* d_offset = offset_.mutable_data<int64_t>(TARGET(kCUDA));
TargetWrapperCuda::MemcpyAsync(d_offset,
lod.data(),
sizeof(int64_t) * lod.size(),
IoDirection::HtoD,
stream);
hipLaunchKernelGGL(( eliminate_pad_effect<float>), dim3(blocks), dim3(threads), 0, stream, out_data,
d_offset,
output_shape[0],
batch_stride,
output_shape[1],
channel_stride,
output_shape[2],
height_stride,
output_shape[3],
width_stride,
count);
hipError_t error = hipGetLastError();
if (error != hipSuccess) LOG(ERROR) << hipGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(var_conv_2d,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::VarConv2DCompute,
def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("W", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Col", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
| 71186b686907c68924e8268c4e96881f39f87d42.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <functional>
#include <memory>
#include <vector>
#include "lite/backends/cuda/math/gemm.h"
#include "lite/core/op_registry.h"
#include "lite/core/target_wrapper.h"
#include "lite/core/tensor.h"
#include "lite/kernels/cuda/var_conv_2d_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
inline int ConvOutputSize(int input_size,
int filter_size,
int dilation,
int pad_left,
int pad_right,
int stride) {
const int dkernel = dilation * (filter_size - 1) + 1;
int output_size =
(input_size + (pad_left + pad_right) - dkernel) / stride + 1;
return output_size;
}
// Eliminate the effects of pad, support batch > 1.
template <typename dtype>
__global__ void eliminate_pad_effect(dtype* src,
const int64_t* offset,
const int num_batch,
const int batch_stride,
const int num_channel,
const int channel_stride,
const int num_height,
const int height_stride,
const int num_width,
const int width_stride,
const int count) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int thread_num = blockDim.x * gridDim.x;
for (tid = threadIdx.x + blockIdx.x * blockDim.x; tid < count;
tid += thread_num) {
int batch_id = tid / batch_stride;
int width_id = tid % num_width;
int cur_len = offset[batch_id + 1] - offset[batch_id];
if (width_id >= cur_len) {
src[tid] = 0.;
}
}
}
void VarConv2DCompute::PrepareForRun() {
auto& context = this->ctx_->template As<CUDAContext>();
auto stream = context.exec_stream();
auto& param = this->Param<param_t>();
conv_param_.x = const_cast<lite::Tensor*>(param.X);
conv_param_.var_length = true;
conv_param_.paddings.reset(new std::vector<int>);
conv_param_.paddings->push_back(static_cast<int>(param.kernel_h / 2));
conv_param_.paddings->push_back(static_cast<int>(param.kernel_h / 2));
conv_param_.paddings->push_back(static_cast<int>(param.kernel_w / 2));
conv_param_.paddings->push_back(static_cast<int>(param.kernel_w / 2));
conv_param_.dilations.reset(new std::vector<int>);
conv_param_.dilations->push_back(1);
conv_param_.dilations->push_back(1);
conv_param_.strides[0] = param.stride_h;
conv_param_.strides[1] = param.stride_w;
conv_param_.filter = const_cast<lite::Tensor*>(param.W);
conv_param_.filter->Resize({param.output_channel,
param.input_channel,
param.kernel_h,
param.kernel_w});
conv_param_.output = param.Out;
std::vector<int64_t> output_shape(
{conv_param_.x->dims()[0], param.output_channel});
for (size_t i = 0; i < conv_param_.strides.size(); ++i) {
output_shape.push_back(
ConvOutputSize(conv_param_.x->dims()[i + 2],
conv_param_.filter->dims()[i + 2],
(*conv_param_.dilations.get())[i],
(*conv_param_.paddings.get())[i * 2],
(*conv_param_.paddings.get())[i * 2 + 1],
conv_param_.strides[i]));
}
if (param.fuse_relu) {
conv_param_.activation_param.has_active = true;
conv_param_.activation_param.active_type = lite_api::ActivationType::kRelu;
}
conv_param_.output->Resize({output_shape});
conv_impl_.reset(new lite::cuda::math::CudnnConv2D<PRECISION(kFloat)>);
conv_impl_->init(conv_param_, &context);
}
void VarConv2DCompute::Run() {
auto& context = this->ctx_->template As<CUDAContext>();
auto stream = context.exec_stream();
auto& param = this->Param<param_t>();
param.Out->set_lod(param.X->lod());
std::vector<int64_t> output_shape(
{conv_param_.x->dims()[0], param.output_channel});
for (size_t i = 0; i < conv_param_.strides.size(); ++i) {
output_shape.push_back(
ConvOutputSize(conv_param_.x->dims()[i + 2],
conv_param_.filter->dims()[i + 2],
(*conv_param_.dilations.get())[i],
(*conv_param_.paddings.get())[i * 2],
(*conv_param_.paddings.get())[i * 2 + 1],
conv_param_.strides[i]));
}
conv_param_.output->Resize({output_shape});
conv_impl_->create(conv_param_, &context);
conv_impl_->run(conv_param_);
// Avoid situations where cascading conv does not support multiple batch
// calculations
float* out_data = param.Out->mutable_data<float>();
const int batch_num = output_shape[1] * output_shape[2] * output_shape[3];
std::vector<int64_t> lod(param.X->lod()[0].size(), 0);
for (size_t i = 0; i < param.X->lod()[0].size(); ++i) {
lod[i] = param.X->lod()[0][i];
}
int count = std::accumulate(
output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());
int width_stride = 1;
int height_stride = output_shape[3];
int channel_stride = output_shape[2] * output_shape[3];
int batch_stride = output_shape[1] * output_shape[2] * output_shape[3];
int threads = 512;
int blocks = (count + threads - 1) / threads;
offset_.Resize({static_cast<int64_t>(lod.size())});
int64_t* d_offset = offset_.mutable_data<int64_t>(TARGET(kCUDA));
TargetWrapperCuda::MemcpyAsync(d_offset,
lod.data(),
sizeof(int64_t) * lod.size(),
IoDirection::HtoD,
stream);
eliminate_pad_effect<float><<<blocks, threads, 0, stream>>>(out_data,
d_offset,
output_shape[0],
batch_stride,
output_shape[1],
channel_stride,
output_shape[2],
height_stride,
output_shape[3],
width_stride,
count);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) LOG(ERROR) << cudaGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(var_conv_2d,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::VarConv2DCompute,
def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("W", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Col", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
|
87dcafdc0506a222ffba4999338a9639aa4b3413.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdexcept>
#include <cmath>
#include "find_block_size.cuh"
#include "../common/user_specified_structures.h"
#include "../common/cuda_error_check.cuh"
// This function does NOT guarantee the best block size. But it tries to come up with the best.
// Be aware block sizes rather than what this function chooses might end up showing better performance.
// Any suggestions to improve this function will be appreciated.
blockSize_N_pair find_proper_block_size(
const int suggestedBlockSize,
const uint nEdges,
const uint nVertices ) {
// Getting current device properties to properly select block size and N.
int currentDevice;
CUDAErrorCheck( hipGetDevice( ¤tDevice ) );
hipDeviceProp_t deviceProp;
CUDAErrorCheck ( hipGetDeviceProperties( &deviceProp, currentDevice ) );
int maxVerticesPerSM = deviceProp.sharedMemPerBlock / sizeof(Vertex);
int MaxBlockPerSM; // Maximum number of resident blocks per multiprocessor. Not queryable (is it a word??) by CUDA runtime.
#if __CUDA_ARCH__ < 300
MaxBlockPerSM = 8;
#endif
#if __CUDA_ARCH__ >= 300 & __CUDA_ARCH__ < 500
MaxBlockPerSM = 16;
#endif
#if __CUDA_ARCH__ >= 500
MaxBlockPerSM = 32;
#endif
// If suggested block size is 0 (user hasn't entered anything), we ignore it.
blockSize_N_pair BS_N;
if( suggestedBlockSize == 0 ) {
int approximated_N = (int)std::sqrt((deviceProp.warpSize * ::pow(nVertices,2))/nEdges); // Please refer to paper for explanation.
//fprintf( stdout, "Approximated N: %d\n", approximated_N);
for( int b_per_SM = 2; b_per_SM<=MaxBlockPerSM; ++b_per_SM ) {
blockSize_N_pair temp_pair;
temp_pair.blockSize = deviceProp.maxThreadsPerMultiProcessor/b_per_SM;
if ( deviceProp.maxThreadsPerMultiProcessor % (temp_pair.blockSize * b_per_SM) != 0 )
continue;
if( temp_pair.blockSize > deviceProp.maxThreadsPerBlock)
continue;
temp_pair.N = maxVerticesPerSM / b_per_SM;
if( temp_pair.N > approximated_N )
BS_N = temp_pair;
}
}
else {
// The behavior is undefined if user-specified block size is not a power of two. Usual block sizes are 1024, 512, 256, and 128.
if( suggestedBlockSize > deviceProp.maxThreadsPerBlock )
throw std::runtime_error( "Specified block size is invalid." );
BS_N.blockSize = suggestedBlockSize;
BS_N.N = ( maxVerticesPerSM * suggestedBlockSize ) / deviceProp.maxThreadsPerMultiProcessor;
}
return BS_N;
}
| 87dcafdc0506a222ffba4999338a9639aa4b3413.cu | #include <stdexcept>
#include <cmath>
#include "find_block_size.cuh"
#include "../common/user_specified_structures.h"
#include "../common/cuda_error_check.cuh"
// This function does NOT guarantee the best block size. But it tries to come up with the best.
// Be aware block sizes rather than what this function chooses might end up showing better performance.
// Any suggestions to improve this function will be appreciated.
blockSize_N_pair find_proper_block_size(
const int suggestedBlockSize,
const uint nEdges,
const uint nVertices ) {
// Getting current device properties to properly select block size and N.
int currentDevice;
CUDAErrorCheck( cudaGetDevice( ¤tDevice ) );
cudaDeviceProp deviceProp;
CUDAErrorCheck ( cudaGetDeviceProperties( &deviceProp, currentDevice ) );
int maxVerticesPerSM = deviceProp.sharedMemPerBlock / sizeof(Vertex);
int MaxBlockPerSM; // Maximum number of resident blocks per multiprocessor. Not queryable (is it a word??) by CUDA runtime.
#if __CUDA_ARCH__ < 300
MaxBlockPerSM = 8;
#endif
#if __CUDA_ARCH__ >= 300 & __CUDA_ARCH__ < 500
MaxBlockPerSM = 16;
#endif
#if __CUDA_ARCH__ >= 500
MaxBlockPerSM = 32;
#endif
// If suggested block size is 0 (user hasn't entered anything), we ignore it.
blockSize_N_pair BS_N;
if( suggestedBlockSize == 0 ) {
int approximated_N = (int)std::sqrt((deviceProp.warpSize * std::pow(nVertices,2))/nEdges); // Please refer to paper for explanation.
//fprintf( stdout, "Approximated N: %d\n", approximated_N);
for( int b_per_SM = 2; b_per_SM<=MaxBlockPerSM; ++b_per_SM ) {
blockSize_N_pair temp_pair;
temp_pair.blockSize = deviceProp.maxThreadsPerMultiProcessor/b_per_SM;
if ( deviceProp.maxThreadsPerMultiProcessor % (temp_pair.blockSize * b_per_SM) != 0 )
continue;
if( temp_pair.blockSize > deviceProp.maxThreadsPerBlock)
continue;
temp_pair.N = maxVerticesPerSM / b_per_SM;
if( temp_pair.N > approximated_N )
BS_N = temp_pair;
}
}
else {
// The behavior is undefined if user-specified block size is not a power of two. Usual block sizes are 1024, 512, 256, and 128.
if( suggestedBlockSize > deviceProp.maxThreadsPerBlock )
throw std::runtime_error( "Specified block size is invalid." );
BS_N.blockSize = suggestedBlockSize;
BS_N.N = ( maxVerticesPerSM * suggestedBlockSize ) / deviceProp.maxThreadsPerMultiProcessor;
}
return BS_N;
}
|
cba0fea02ff9d7f9e9552697511761eb0cca3db1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <GL/glut.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define WIDTH 800
#define HEIGHT 600
int tmax = 100;
int *gpu_t;
int cpu_t[WIDTH*HEIGHT];
double center_x;
double center_y;
double center_dy;
double center_dx;
// int mandel(float, float, int);
void translate(int, int, double*, double*);
__device__ double translatex(int x, float center_x, float center_dx)
{
return x*2.0*center_dx/WIDTH + (center_x - center_dx);
}
__device__ double translatey(int y, float center_y, float center_dy)
{
return y*2.0*center_dy/HEIGHT + (center_y - center_dy);
}
__global__ void mandel(int* gpu_t, int tmax, float center_x, float center_y, float center_dx, float center_dy)
{
double x = translatex(blockIdx.x, center_x, center_dx);
double y = translatey(blockIdx.y, center_y, center_dy);
double a = 0.0;
double b = 0.0;
int i;
for(i=0; i< tmax; i++)
{
double anew = a*a - b*b + x;
double bnew = 2*a*b + y;
a = anew;
b = bnew;
if(a*a + b*b > 4.0)
break;
}
*(gpu_t + blockIdx.x + WIDTH*blockIdx.y) = i+1;
}
void idlefunc(void)
{
// DO SOME STUFF
}
void displayfunc(void)
{
if(tmax == 0)
tmax = 1;
glClear(GL_COLOR_BUFFER_BIT);
int x;
// int y;
hipMalloc((void**)&gpu_t, HEIGHT*WIDTH*sizeof(int));
// printf("Declared array\n");
dim3 grid(WIDTH, HEIGHT);
hipLaunchKernelGGL(( mandel), dim3(grid), dim3(1), 0, 0, gpu_t, tmax, center_x, center_y, center_dx, center_dy);
// printf("Called the kernel\n");
hipMemcpy(cpu_t, gpu_t, HEIGHT*WIDTH*sizeof(int), hipMemcpyDeviceToHost);
// printf("Copied the data back\n");
for(x=0;x<WIDTH*HEIGHT;x++)
{
glColor3f( 0 , cpu_t[x]*1.0/tmax , 0 ) ; // brown
//
glBegin(GL_POINTS);
glVertex2f(x%WIDTH, x/WIDTH);
glEnd();
// if(x%WIDTH == 0 ) glutSwapBuffers();
}
hipFree(gpu_t);
glutSwapBuffers();
// exit(0);
// printf("Displayed data\n");
}
void reshapefunc(int wscr,int hscr)
{
// HEIGHT = hscr;
// WIDTH = wscr;
glViewport(0,0,(GLsizei)WIDTH,(GLsizei)HEIGHT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0,1.0*WIDTH,0.0,1.0*HEIGHT);
glMatrixMode(GL_MODELVIEW);
}
void mousefunc(int button,int state,int xscr,int yscr)
{
if(button==GLUT_LEFT_BUTTON && state==GLUT_DOWN)
{
// printf("Left (%d, %d)\n",xscr,HEIGHT-yscr);
double xnew = 0;
double ynew = 0;
translate(xscr, HEIGHT-yscr, &xnew, &ynew);
center_x = xnew;
center_y = ynew;
center_dx*= 0.7;
center_dy*= 0.7;
printf("(%0.16f, %0.16f. %0.16f, %0.16f)\n", center_x, center_y, center_dx, center_dy);
glutPostRedisplay();
}
else if(button==GLUT_RIGHT_BUTTON && state==GLUT_DOWN)
{
double xnew = 0;
double ynew = 0;
translate(xscr, HEIGHT-yscr, &xnew, &ynew);
center_x = xnew;
center_y = ynew;
center_dx/= 0.7;
center_dy/= 0.7;
printf("(%0.16f, %0.16f. %0.16f, %0.16f)\n", center_x, center_y, center_dx, center_dy);
glutPostRedisplay();
}
}
void motionfunc(int xscr,int yscr)
{
// SHOW MOUSE MOTION
}
void keyfunc(unsigned char key,int xscr,int yscr)
{
if(key == 32)
{
tmax *= 2;
printf("%d\n", tmax);
glutPostRedisplay();
}
}
void specialfunc(int key,int xscr,int yscr)
{
if(key == GLUT_KEY_DOWN)
{
tmax /= 2;
printf("%d\n", tmax);
glutPostRedisplay();
}
if(key == GLUT_KEY_UP)
{
tmax *= 2;
printf("%d\n", tmax);
glutPostRedisplay();
}
}
void closefunc(void)
{
printf("Window closed.\n");
printf("MEM CLEARED\n");
}
int main(int argc, char *argv[])
{
center_x = 0.4099716787079002;
center_y = -0.1482886524476352;
center_dx = 0.0002682137239328;
center_dy = 0.0002011602929496;
tmax = 400;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
glutInitWindowSize(WIDTH, HEIGHT);
glutInitWindowPosition(100,50);
glutCreateWindow("Mandelbrot Set");
glClearColor(1.0, 1.0, 1.0, 0.0);
glShadeModel(GL_SMOOTH);
glutIdleFunc(idlefunc);
glutDisplayFunc(displayfunc);
glutReshapeFunc(reshapefunc);
glutMouseFunc(mousefunc);
glutMotionFunc(motionfunc);
glutKeyboardFunc(keyfunc);
glutSpecialFunc(specialfunc);
glutMainLoop();
return 0;
}
// int mandel(float x, float y, int tmax)
// {
// float a = 0.0;
// float b = 0.0;
// int i;
// for(i=0; i<tmax; i++)
// {
// float anew = a*a - b*b + x;
// float bnew = 2*a*b + y;
// a = anew;
// b = bnew;
// if(a*a + b*b > 4.0)
// break;
// }
// return (i+1);
// }
void translate(int x, int y, double* a, double* b)
{
*a = x*2.0*center_dx/WIDTH + (center_x - center_dx);
*b = y*2.0*center_dy/HEIGHT + (center_y - center_dy);
}
| cba0fea02ff9d7f9e9552697511761eb0cca3db1.cu | #include <stdio.h>
#include <GL/glut.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define WIDTH 800
#define HEIGHT 600
int tmax = 100;
int *gpu_t;
int cpu_t[WIDTH*HEIGHT];
double center_x;
double center_y;
double center_dy;
double center_dx;
// int mandel(float, float, int);
void translate(int, int, double*, double*);
__device__ double translatex(int x, float center_x, float center_dx)
{
return x*2.0*center_dx/WIDTH + (center_x - center_dx);
}
__device__ double translatey(int y, float center_y, float center_dy)
{
return y*2.0*center_dy/HEIGHT + (center_y - center_dy);
}
__global__ void mandel(int* gpu_t, int tmax, float center_x, float center_y, float center_dx, float center_dy)
{
double x = translatex(blockIdx.x, center_x, center_dx);
double y = translatey(blockIdx.y, center_y, center_dy);
double a = 0.0;
double b = 0.0;
int i;
for(i=0; i< tmax; i++)
{
double anew = a*a - b*b + x;
double bnew = 2*a*b + y;
a = anew;
b = bnew;
if(a*a + b*b > 4.0)
break;
}
*(gpu_t + blockIdx.x + WIDTH*blockIdx.y) = i+1;
}
void idlefunc(void)
{
// DO SOME STUFF
}
void displayfunc(void)
{
if(tmax == 0)
tmax = 1;
glClear(GL_COLOR_BUFFER_BIT);
int x;
// int y;
cudaMalloc((void**)&gpu_t, HEIGHT*WIDTH*sizeof(int));
// printf("Declared array\n");
dim3 grid(WIDTH, HEIGHT);
mandel<<<grid, 1>>>(gpu_t, tmax, center_x, center_y, center_dx, center_dy);
// printf("Called the kernel\n");
cudaMemcpy(cpu_t, gpu_t, HEIGHT*WIDTH*sizeof(int), cudaMemcpyDeviceToHost);
// printf("Copied the data back\n");
for(x=0;x<WIDTH*HEIGHT;x++)
{
glColor3f( 0 , cpu_t[x]*1.0/tmax , 0 ) ; // brown
//
glBegin(GL_POINTS);
glVertex2f(x%WIDTH, x/WIDTH);
glEnd();
// if(x%WIDTH == 0 ) glutSwapBuffers();
}
cudaFree(gpu_t);
glutSwapBuffers();
// exit(0);
// printf("Displayed data\n");
}
void reshapefunc(int wscr,int hscr)
{
// HEIGHT = hscr;
// WIDTH = wscr;
glViewport(0,0,(GLsizei)WIDTH,(GLsizei)HEIGHT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0,1.0*WIDTH,0.0,1.0*HEIGHT);
glMatrixMode(GL_MODELVIEW);
}
void mousefunc(int button,int state,int xscr,int yscr)
{
if(button==GLUT_LEFT_BUTTON && state==GLUT_DOWN)
{
// printf("Left (%d, %d)\n",xscr,HEIGHT-yscr);
double xnew = 0;
double ynew = 0;
translate(xscr, HEIGHT-yscr, &xnew, &ynew);
center_x = xnew;
center_y = ynew;
center_dx*= 0.7;
center_dy*= 0.7;
printf("(%0.16f, %0.16f. %0.16f, %0.16f)\n", center_x, center_y, center_dx, center_dy);
glutPostRedisplay();
}
else if(button==GLUT_RIGHT_BUTTON && state==GLUT_DOWN)
{
double xnew = 0;
double ynew = 0;
translate(xscr, HEIGHT-yscr, &xnew, &ynew);
center_x = xnew;
center_y = ynew;
center_dx/= 0.7;
center_dy/= 0.7;
printf("(%0.16f, %0.16f. %0.16f, %0.16f)\n", center_x, center_y, center_dx, center_dy);
glutPostRedisplay();
}
}
void motionfunc(int xscr,int yscr)
{
// SHOW MOUSE MOTION
}
void keyfunc(unsigned char key,int xscr,int yscr)
{
if(key == 32)
{
tmax *= 2;
printf("%d\n", tmax);
glutPostRedisplay();
}
}
void specialfunc(int key,int xscr,int yscr)
{
if(key == GLUT_KEY_DOWN)
{
tmax /= 2;
printf("%d\n", tmax);
glutPostRedisplay();
}
if(key == GLUT_KEY_UP)
{
tmax *= 2;
printf("%d\n", tmax);
glutPostRedisplay();
}
}
void closefunc(void)
{
printf("Window closed.\n");
printf("MEM CLEARED\n");
}
int main(int argc, char *argv[])
{
center_x = 0.4099716787079002;
center_y = -0.1482886524476352;
center_dx = 0.0002682137239328;
center_dy = 0.0002011602929496;
tmax = 400;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
glutInitWindowSize(WIDTH, HEIGHT);
glutInitWindowPosition(100,50);
glutCreateWindow("Mandelbrot Set");
glClearColor(1.0, 1.0, 1.0, 0.0);
glShadeModel(GL_SMOOTH);
glutIdleFunc(idlefunc);
glutDisplayFunc(displayfunc);
glutReshapeFunc(reshapefunc);
glutMouseFunc(mousefunc);
glutMotionFunc(motionfunc);
glutKeyboardFunc(keyfunc);
glutSpecialFunc(specialfunc);
glutMainLoop();
return 0;
}
// int mandel(float x, float y, int tmax)
// {
// float a = 0.0;
// float b = 0.0;
// int i;
// for(i=0; i<tmax; i++)
// {
// float anew = a*a - b*b + x;
// float bnew = 2*a*b + y;
// a = anew;
// b = bnew;
// if(a*a + b*b > 4.0)
// break;
// }
// return (i+1);
// }
void translate(int x, int y, double* a, double* b)
{
*a = x*2.0*center_dx/WIDTH + (center_x - center_dx);
*b = y*2.0*center_dy/HEIGHT + (center_y - center_dy);
}
|
eb908069c28d9cff9d39cbb2945fb48b3e4e9ded.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "cudarray/common.hpp"
#include "cudarray/nnet/pool_b01.hpp"
namespace cudarray {
// The implementations below are inspired by those found in the Caffe framework
template <typename T>
__global__ void max_pool_b01(int n_threads, const T* imgs,
int img_h, int img_w, int poolout_h, int poolout_w, int win_h, int win_w,
int pad_y, int pad_x, int stride_y, int stride_x, T* poolout, int* mask) {
CUDA_GRID_STRIDE_LOOP(idx, n_threads) {
int poolout_x = idx % poolout_w;
int poolout_y = (idx / poolout_w) % poolout_h;
int n = idx / poolout_w / poolout_h;
int img_y_start = poolout_y * stride_y - pad_y;
int img_x_start = poolout_x * stride_x - pad_x;
int img_y_end = min(img_y_start + win_h, img_h);
int img_x_end = min(img_x_start + win_w, img_w);
img_y_start = max(img_y_start, 0);
img_x_start = max(img_x_start, 0);
T maxval = -FLT_MAX;
int maxidx = -1;
imgs += n * img_h * img_w;
for (int h = img_y_start; h < img_y_end; ++h) {
for (int w = img_x_start; w < img_x_end; ++w) {
if (imgs[h * img_w + w] > maxval) {
maxidx = h * img_w + w;
maxval = imgs[maxidx];
}
}
}
poolout[idx] = maxval;
mask[idx] = maxidx;
}
}
template <typename T>
void max_pool_b01(const T* imgs, int n_imgs, int img_h, int img_w, int win_h,
int win_w, int pad_y, int pad_x, int stride_y, int stride_x, T* poolout,
int* mask) {
int poolout_h = (img_h + 2*pad_y - win_h) / stride_y + 1;
int poolout_w = (img_w + 2*pad_x - win_w) / stride_x + 1;
int n_threads = n_imgs * poolout_h * poolout_w;
hipLaunchKernelGGL(( max_pool_b01), dim3(cuda_blocks(n_threads)), dim3(kNumBlockThreads), 0, 0,
n_threads, imgs, img_h, img_w, poolout_h, poolout_w, win_h, win_w, pad_y,
pad_x, stride_y, stride_x, poolout, mask);
CUDA_KERNEL_CHECK;
}
template void max_pool_b01<float>(const float* imgs, int n_imgs, int img_h,
int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y,
int stride_x, float* poolout, int* mask);
template <typename T>
__global__ void max_pool_b01_bprob(int n_threads, const T* poolout_d,
const int* mask, int img_h, int img_w, int poolout_h, int poolout_w,
int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x,
T* imgs_d) {
CUDA_GRID_STRIDE_LOOP(idx, n_threads) {
int img_x = idx % img_w;
int img_y = (idx / img_w) % img_h;
int n = idx / img_w / img_h;
int poolout_y_start = (img_y + pad_y < win_h)
? 0 : (img_y + pad_y - win_h) / stride_y + 1;
int poolout_y_end = min((img_y + pad_y) / stride_y + 1, poolout_h);
int poolout_x_start = (img_x + pad_x < win_w)
? 0 : (img_x + pad_x - win_w) / stride_x + 1;
int poolout_x_end = min((img_x + pad_x) / stride_x + 1, poolout_w);
int offset = n * poolout_h * poolout_w;
poolout_d += offset;
mask += offset;
T gradient = 0;
for (int ph = poolout_y_start; ph < poolout_y_end; ++ph) {
for (int pw = poolout_x_start; pw < poolout_x_end; ++pw) {
if (mask[ph * poolout_w + pw] == img_y * img_w + img_x) {
gradient += poolout_d[ph * poolout_w + pw];
}
}
}
imgs_d[idx] = gradient;
}
}
template <typename T>
void max_pool_b01_bprob(const T* poolout_d, const int* mask, int n_imgs,
int img_h, int img_w, int win_h, int win_w, int pad_y, int pad_x,
int stride_y, int stride_x, T* imgs_d) {
int poolout_h = (img_h + 2*pad_y - win_h) / stride_y + 1;
int poolout_w = (img_w + 2*pad_x - win_w) / stride_x + 1;
int n_threads = n_imgs * img_h * img_w;
hipLaunchKernelGGL(( max_pool_b01_bprob), dim3(cuda_blocks(n_threads)), dim3(kNumBlockThreads), 0, 0,
n_threads, poolout_d, mask, img_h, img_w, poolout_h, poolout_w, win_h,
win_w, pad_y, pad_x, stride_y, stride_x, imgs_d);
CUDA_KERNEL_CHECK;
}
template void max_pool_b01_bprob(const float* poolout_d, const int* mask,
int n_imgs, int img_h, int img_w, int win_h, int win_w, int pad_y,
int pad_x, int stride_y, int stride_x, float* imgs_d);
template <typename T>
__global__ void avg_pool_b01(int n_threads, const T* imgs,
int img_h, int img_w, int poolout_h, int poolout_w, int win_h, int win_w,
int pad_y, int pad_x, int stride_y, int stride_x, T* poolout) {
CUDA_GRID_STRIDE_LOOP(idx, n_threads) {
int poolout_x = idx % poolout_w;
int poolout_y = (idx / poolout_w) % poolout_h;
int n = idx / poolout_w / poolout_h;
int img_y_start = poolout_y * stride_y - pad_y;
int img_x_start = poolout_x * stride_x - pad_x;
int img_y_end = min(img_y_start + win_h, img_h);
int img_x_end = min(img_x_start + win_w, img_w);
img_y_start = max(img_y_start, 0);
img_x_start = max(img_x_start, 0);
T sum = 0;
imgs += n * img_h * img_w;
for (int h = img_y_start; h < img_y_end; ++h) {
for (int w = img_x_start; w < img_x_end; ++w) {
sum += imgs[h * img_w + w];
}
}
poolout[idx] = sum / (win_h*win_w);
}
}
template <typename T>
void avg_pool_b01(const T* imgs, int n_imgs, int img_h, int img_w, int win_h,
int win_w, int pad_y, int pad_x, int stride_y, int stride_x, T* poolout) {
int poolout_h = (img_h + 2*pad_y - win_h) / stride_y + 1;
int poolout_w = (img_w + 2*pad_x - win_w) / stride_x + 1;
int n_threads = n_imgs * poolout_h * poolout_w;
hipLaunchKernelGGL(( avg_pool_b01), dim3(cuda_blocks(n_threads)), dim3(kNumBlockThreads), 0, 0,
n_threads, imgs, img_h, img_w, poolout_h, poolout_w, win_h, win_w, pad_y,
pad_x, stride_y, stride_x, poolout);
CUDA_KERNEL_CHECK;
}
template void avg_pool_b01<float>(const float* imgs, int n_imgs, int img_h,
int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y,
int stride_x, float* poolout);
template <typename T>
__global__ void avg_pool_b01_bprob(int n_threads, const T* poolout_d,
int img_h, int img_w, int poolout_h, int poolout_w, int win_h, int win_w,
int pad_y, int pad_x, int stride_y, int stride_x, T* imgs_d) {
CUDA_GRID_STRIDE_LOOP(idx, n_threads) {
int img_x = idx % img_w;
int img_y = (idx / img_w) % img_h;
int n = idx / img_w / img_h;
int poolout_y_start = (img_y + pad_y < win_h)
? 0 : (img_y + pad_y - win_h) / stride_y + 1;
int poolout_y_end = min((img_y + pad_y) / stride_y + 1, poolout_h);
int poolout_x_start = (img_x + pad_x < win_w)
? 0 : (img_x + pad_x - win_w) / stride_x + 1;
int poolout_x_end = min((img_x + pad_x) / stride_x + 1, poolout_w);
int offset = n * poolout_h * poolout_w;
poolout_d += offset;
T gradient = 0;
for (int ph = poolout_y_start; ph < poolout_y_end; ++ph) {
for (int pw = poolout_x_start; pw < poolout_x_end; ++pw) {
gradient += poolout_d[ph * poolout_w + pw];
}
}
imgs_d[idx] = gradient / (win_h * win_w);
}
}
template <typename T>
void avg_pool_b01_bprob(const T* poolout_d, int n_imgs, int img_h, int img_w,
int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x,
T* imgs_d) {
int poolout_h = (img_h + 2*pad_y - win_h) / stride_y + 1;
int poolout_w = (img_w + 2*pad_x - win_w) / stride_x + 1;
int n_threads = n_imgs * img_h * img_w;
hipLaunchKernelGGL(( avg_pool_b01_bprob), dim3(cuda_blocks(n_threads)), dim3(kNumBlockThreads), 0, 0,
n_threads, poolout_d, img_h, img_w, poolout_h, poolout_w, win_h,
win_w, pad_y, pad_x, stride_y, stride_x, imgs_d);
CUDA_KERNEL_CHECK;
}
template void avg_pool_b01_bprob(const float* poolout_d, int n_imgs, int img_h,
int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y,
int stride_x, float* imgs_d);
}
| eb908069c28d9cff9d39cbb2945fb48b3e4e9ded.cu | #include <cfloat>
#include "cudarray/common.hpp"
#include "cudarray/nnet/pool_b01.hpp"
namespace cudarray {
// The implementations below are inspired by those found in the Caffe framework
template <typename T>
__global__ void max_pool_b01(int n_threads, const T* imgs,
int img_h, int img_w, int poolout_h, int poolout_w, int win_h, int win_w,
int pad_y, int pad_x, int stride_y, int stride_x, T* poolout, int* mask) {
CUDA_GRID_STRIDE_LOOP(idx, n_threads) {
int poolout_x = idx % poolout_w;
int poolout_y = (idx / poolout_w) % poolout_h;
int n = idx / poolout_w / poolout_h;
int img_y_start = poolout_y * stride_y - pad_y;
int img_x_start = poolout_x * stride_x - pad_x;
int img_y_end = min(img_y_start + win_h, img_h);
int img_x_end = min(img_x_start + win_w, img_w);
img_y_start = max(img_y_start, 0);
img_x_start = max(img_x_start, 0);
T maxval = -FLT_MAX;
int maxidx = -1;
imgs += n * img_h * img_w;
for (int h = img_y_start; h < img_y_end; ++h) {
for (int w = img_x_start; w < img_x_end; ++w) {
if (imgs[h * img_w + w] > maxval) {
maxidx = h * img_w + w;
maxval = imgs[maxidx];
}
}
}
poolout[idx] = maxval;
mask[idx] = maxidx;
}
}
template <typename T>
void max_pool_b01(const T* imgs, int n_imgs, int img_h, int img_w, int win_h,
int win_w, int pad_y, int pad_x, int stride_y, int stride_x, T* poolout,
int* mask) {
int poolout_h = (img_h + 2*pad_y - win_h) / stride_y + 1;
int poolout_w = (img_w + 2*pad_x - win_w) / stride_x + 1;
int n_threads = n_imgs * poolout_h * poolout_w;
max_pool_b01<<<cuda_blocks(n_threads), kNumBlockThreads>>>(
n_threads, imgs, img_h, img_w, poolout_h, poolout_w, win_h, win_w, pad_y,
pad_x, stride_y, stride_x, poolout, mask);
CUDA_KERNEL_CHECK;
}
template void max_pool_b01<float>(const float* imgs, int n_imgs, int img_h,
int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y,
int stride_x, float* poolout, int* mask);
template <typename T>
__global__ void max_pool_b01_bprob(int n_threads, const T* poolout_d,
const int* mask, int img_h, int img_w, int poolout_h, int poolout_w,
int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x,
T* imgs_d) {
CUDA_GRID_STRIDE_LOOP(idx, n_threads) {
int img_x = idx % img_w;
int img_y = (idx / img_w) % img_h;
int n = idx / img_w / img_h;
int poolout_y_start = (img_y + pad_y < win_h)
? 0 : (img_y + pad_y - win_h) / stride_y + 1;
int poolout_y_end = min((img_y + pad_y) / stride_y + 1, poolout_h);
int poolout_x_start = (img_x + pad_x < win_w)
? 0 : (img_x + pad_x - win_w) / stride_x + 1;
int poolout_x_end = min((img_x + pad_x) / stride_x + 1, poolout_w);
int offset = n * poolout_h * poolout_w;
poolout_d += offset;
mask += offset;
T gradient = 0;
for (int ph = poolout_y_start; ph < poolout_y_end; ++ph) {
for (int pw = poolout_x_start; pw < poolout_x_end; ++pw) {
if (mask[ph * poolout_w + pw] == img_y * img_w + img_x) {
gradient += poolout_d[ph * poolout_w + pw];
}
}
}
imgs_d[idx] = gradient;
}
}
template <typename T>
void max_pool_b01_bprob(const T* poolout_d, const int* mask, int n_imgs,
int img_h, int img_w, int win_h, int win_w, int pad_y, int pad_x,
int stride_y, int stride_x, T* imgs_d) {
int poolout_h = (img_h + 2*pad_y - win_h) / stride_y + 1;
int poolout_w = (img_w + 2*pad_x - win_w) / stride_x + 1;
int n_threads = n_imgs * img_h * img_w;
max_pool_b01_bprob<<<cuda_blocks(n_threads), kNumBlockThreads>>>(
n_threads, poolout_d, mask, img_h, img_w, poolout_h, poolout_w, win_h,
win_w, pad_y, pad_x, stride_y, stride_x, imgs_d);
CUDA_KERNEL_CHECK;
}
template void max_pool_b01_bprob(const float* poolout_d, const int* mask,
int n_imgs, int img_h, int img_w, int win_h, int win_w, int pad_y,
int pad_x, int stride_y, int stride_x, float* imgs_d);
template <typename T>
__global__ void avg_pool_b01(int n_threads, const T* imgs,
int img_h, int img_w, int poolout_h, int poolout_w, int win_h, int win_w,
int pad_y, int pad_x, int stride_y, int stride_x, T* poolout) {
CUDA_GRID_STRIDE_LOOP(idx, n_threads) {
int poolout_x = idx % poolout_w;
int poolout_y = (idx / poolout_w) % poolout_h;
int n = idx / poolout_w / poolout_h;
int img_y_start = poolout_y * stride_y - pad_y;
int img_x_start = poolout_x * stride_x - pad_x;
int img_y_end = min(img_y_start + win_h, img_h);
int img_x_end = min(img_x_start + win_w, img_w);
img_y_start = max(img_y_start, 0);
img_x_start = max(img_x_start, 0);
T sum = 0;
imgs += n * img_h * img_w;
for (int h = img_y_start; h < img_y_end; ++h) {
for (int w = img_x_start; w < img_x_end; ++w) {
sum += imgs[h * img_w + w];
}
}
poolout[idx] = sum / (win_h*win_w);
}
}
template <typename T>
void avg_pool_b01(const T* imgs, int n_imgs, int img_h, int img_w, int win_h,
int win_w, int pad_y, int pad_x, int stride_y, int stride_x, T* poolout) {
int poolout_h = (img_h + 2*pad_y - win_h) / stride_y + 1;
int poolout_w = (img_w + 2*pad_x - win_w) / stride_x + 1;
int n_threads = n_imgs * poolout_h * poolout_w;
avg_pool_b01<<<cuda_blocks(n_threads), kNumBlockThreads>>>(
n_threads, imgs, img_h, img_w, poolout_h, poolout_w, win_h, win_w, pad_y,
pad_x, stride_y, stride_x, poolout);
CUDA_KERNEL_CHECK;
}
template void avg_pool_b01<float>(const float* imgs, int n_imgs, int img_h,
int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y,
int stride_x, float* poolout);
template <typename T>
__global__ void avg_pool_b01_bprob(int n_threads, const T* poolout_d,
int img_h, int img_w, int poolout_h, int poolout_w, int win_h, int win_w,
int pad_y, int pad_x, int stride_y, int stride_x, T* imgs_d) {
CUDA_GRID_STRIDE_LOOP(idx, n_threads) {
int img_x = idx % img_w;
int img_y = (idx / img_w) % img_h;
int n = idx / img_w / img_h;
int poolout_y_start = (img_y + pad_y < win_h)
? 0 : (img_y + pad_y - win_h) / stride_y + 1;
int poolout_y_end = min((img_y + pad_y) / stride_y + 1, poolout_h);
int poolout_x_start = (img_x + pad_x < win_w)
? 0 : (img_x + pad_x - win_w) / stride_x + 1;
int poolout_x_end = min((img_x + pad_x) / stride_x + 1, poolout_w);
int offset = n * poolout_h * poolout_w;
poolout_d += offset;
T gradient = 0;
for (int ph = poolout_y_start; ph < poolout_y_end; ++ph) {
for (int pw = poolout_x_start; pw < poolout_x_end; ++pw) {
gradient += poolout_d[ph * poolout_w + pw];
}
}
imgs_d[idx] = gradient / (win_h * win_w);
}
}
template <typename T>
void avg_pool_b01_bprob(const T* poolout_d, int n_imgs, int img_h, int img_w,
int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x,
T* imgs_d) {
int poolout_h = (img_h + 2*pad_y - win_h) / stride_y + 1;
int poolout_w = (img_w + 2*pad_x - win_w) / stride_x + 1;
int n_threads = n_imgs * img_h * img_w;
avg_pool_b01_bprob<<<cuda_blocks(n_threads), kNumBlockThreads>>>(
n_threads, poolout_d, img_h, img_w, poolout_h, poolout_w, win_h,
win_w, pad_y, pad_x, stride_y, stride_x, imgs_d);
CUDA_KERNEL_CHECK;
}
template void avg_pool_b01_bprob(const float* poolout_d, int n_imgs, int img_h,
int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y,
int stride_x, float* imgs_d);
}
|
ea49c6d2ac6b5814251ef1c4647d0803b175d374.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__global__ void matrix_multiply(const int* d_a, const int* d_b, int* d_c, const int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int temp = 0;
if ((row < n) && (col < n)) {
for (int k = 0; k < n; k++) {
temp += d_a[row * n + k] * d_b[k * n + col];
}
d_c[row * n + col] = temp;
}
};
int main(int argc, char** argv)
{
const int n = atoi(argv[1]);
size_t bytes = n * n * sizeof(int);
int* h_a = new int[n * n];
int* h_b = new int[n * n];
int* h_c = new int[n * n];
;
int *d_a, *d_b, *d_c;
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
//Matrix initialization
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
h_a[row * n + col] = row;
}
}
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
h_b[row * n + col] = row;
}
}
//Host ---> Device ( h_a -> d_a )
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
const int BLOCK_SIZE = 32;
const int GRID_SIZE = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE, 1);
// Matrix Multiplication - CUDA KERNEL
matrix_multiply<<<grid, threads> > >(d_a, d_b, d_c, n);
//Device --> Host (d_c -> h_c)
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
std::cout << "\nResult of matrix multiplication on GPU: \n";
//Print only if n < 20
if (n < 20 && h_c != NULL) {
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
std::cout << "\t" << h_c[row * n + col];
}
std::cout << "\n";
}
}
std::cout << "\n";
//Completed GPU computation
std::cout << "The Matrix multiplication of size n = " << n << " was completed \n\n";
// Free Host memory
delete[] h_a;
h_a = NULL;
delete[] h_b;
h_b = NULL;
delete[] h_c;
h_c = NULL;
// Free Device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| ea49c6d2ac6b5814251ef1c4647d0803b175d374.cu | #include <iostream>
__global__ void matrix_multiply(const int* d_a, const int* d_b, int* d_c, const int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int temp = 0;
if ((row < n) && (col < n)) {
for (int k = 0; k < n; k++) {
temp += d_a[row * n + k] * d_b[k * n + col];
}
d_c[row * n + col] = temp;
}
};
int main(int argc, char** argv)
{
const int n = atoi(argv[1]);
size_t bytes = n * n * sizeof(int);
int* h_a = new int[n * n];
int* h_b = new int[n * n];
int* h_c = new int[n * n];
;
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
//Matrix initialization
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
h_a[row * n + col] = row;
}
}
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
h_b[row * n + col] = row;
}
}
//Host ---> Device ( h_a -> d_a )
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
const int BLOCK_SIZE = 32;
const int GRID_SIZE = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE, 1);
// Matrix Multiplication - CUDA KERNEL
matrix_multiply<<<grid, threads> > >(d_a, d_b, d_c, n);
//Device --> Host (d_c -> h_c)
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
std::cout << "\nResult of matrix multiplication on GPU: \n";
//Print only if n < 20
if (n < 20 && h_c != NULL) {
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
std::cout << "\t" << h_c[row * n + col];
}
std::cout << "\n";
}
}
std::cout << "\n";
//Completed GPU computation
std::cout << "The Matrix multiplication of size n = " << n << " was completed \n\n";
// Free Host memory
delete[] h_a;
h_a = NULL;
delete[] h_b;
h_b = NULL;
delete[] h_c;
h_c = NULL;
// Free Device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
71c4fbd05d66455c7c89ce0e2990fa37d8ee7a4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaCrop.h"
// gpuCrop
template<typename T>
__global__ void gpuCrop( T* input, T* output, int offsetX, int offsetY,
int inWidth, int outWidth, int outHeight )
{
const int out_x = blockIdx.x * blockDim.x + threadIdx.x;
const int out_y = blockIdx.y * blockDim.y + threadIdx.y;
if( out_x >= outWidth || out_y >= outHeight )
return;
const int in_x = out_x + offsetX;
const int in_y = out_y + offsetY;
output[out_y * outWidth + out_x] = input[in_y * inWidth + in_x];
}
// launchCrop
template<typename T>
static hipError_t launchCrop( T* input, T* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || inputHeight == 0 )
return hipErrorInvalidValue;
// get the ROI/output dimensions
const int outputWidth = roi.z - roi.x;
const int outputHeight = roi.w - roi.y;
// validate the requested ROI
if( outputWidth <= 0 || outputHeight <= 0 )
return hipErrorInvalidValue;
if( outputWidth > inputWidth || outputHeight > inputHeight )
return hipErrorInvalidValue;
if( roi.x < 0 || roi.y < 0 || roi.z < 0 || roi.w < 0 )
return hipErrorInvalidValue;
if( roi.z >= inputWidth || roi.w >= inputHeight )
return hipErrorInvalidValue;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuCrop<T>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, roi.x, roi.y, inputWidth, outputWidth, outputHeight);
return CUDA(hipGetLastError());
}
// cudaCrop (uint8 grayscale)
hipError_t cudaCrop( uint8_t* input, uint8_t* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
return launchCrop<uint8_t>(input, output, roi, inputWidth, inputHeight);
}
// cudaCrop (float grayscale)
hipError_t cudaCrop( float* input, float* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
return launchCrop<float>(input, output, roi, inputWidth, inputHeight);
}
// cudaCrop (uchar3)
hipError_t cudaCrop( uchar3* input, uchar3* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
return launchCrop<uchar3>(input, output, roi, inputWidth, inputHeight);
}
// cudaCrop (uchar4)
hipError_t cudaCrop( uchar4* input, uchar4* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
return launchCrop<uchar4>(input, output, roi, inputWidth, inputHeight);
}
// cudaCrop (float3)
hipError_t cudaCrop( float3* input, float3* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
return launchCrop<float3>(input, output, roi, inputWidth, inputHeight);
}
// cudaCrop (float4)
hipError_t cudaCrop( float4* input, float4* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
return launchCrop<float4>(input, output, roi, inputWidth, inputHeight);
}
//-----------------------------------------------------------------------------------
hipError_t cudaCrop( void* input, void* output, const int4& roi, size_t inputWidth, size_t inputHeight, imageFormat format )
{
if( format == IMAGE_RGB8 || format == IMAGE_BGR8 )
return cudaCrop((uchar3*)input, (uchar3*)output, roi, inputWidth, inputHeight);
else if( format == IMAGE_RGBA8 || format == IMAGE_BGRA8 )
return cudaCrop((uchar4*)input, (uchar4*)output, roi, inputWidth, inputHeight);
else if( format == IMAGE_RGB32F || format == IMAGE_BGR32F )
return cudaCrop((float3*)input, (float3*)output, roi, inputWidth, inputHeight);
else if( format == IMAGE_RGBA32F || format == IMAGE_BGRA32F )
return cudaCrop((float4*)input, (float4*)output, roi, inputWidth, inputHeight);
else if( format == IMAGE_GRAY8 )
return cudaCrop((uint8_t*)input, (uint8_t*)output, roi, inputWidth, inputHeight);
else if( format == IMAGE_GRAY32F )
return cudaCrop((float*)input, (float*)output, roi, inputWidth, inputHeight);
LogError(LOG_CUDA "cudaCrop() -- invalid image format '%s'\n", imageFormatToStr(format));
LogError(LOG_CUDA " supported formats are:\n");
LogError(LOG_CUDA " * gray8\n");
LogError(LOG_CUDA " * gray32f\n");
LogError(LOG_CUDA " * rgb8, bgr8\n");
LogError(LOG_CUDA " * rgba8, bgra8\n");
LogError(LOG_CUDA " * rgb32f, bgr32f\n");
LogError(LOG_CUDA " * rgba32f, bgra32f\n");
return hipErrorInvalidValue;
}
| 71c4fbd05d66455c7c89ce0e2990fa37d8ee7a4b.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaCrop.h"
// gpuCrop
template<typename T>
__global__ void gpuCrop( T* input, T* output, int offsetX, int offsetY,
int inWidth, int outWidth, int outHeight )
{
const int out_x = blockIdx.x * blockDim.x + threadIdx.x;
const int out_y = blockIdx.y * blockDim.y + threadIdx.y;
if( out_x >= outWidth || out_y >= outHeight )
return;
const int in_x = out_x + offsetX;
const int in_y = out_y + offsetY;
output[out_y * outWidth + out_x] = input[in_y * inWidth + in_x];
}
// launchCrop
template<typename T>
static cudaError_t launchCrop( T* input, T* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || inputHeight == 0 )
return cudaErrorInvalidValue;
// get the ROI/output dimensions
const int outputWidth = roi.z - roi.x;
const int outputHeight = roi.w - roi.y;
// validate the requested ROI
if( outputWidth <= 0 || outputHeight <= 0 )
return cudaErrorInvalidValue;
if( outputWidth > inputWidth || outputHeight > inputHeight )
return cudaErrorInvalidValue;
if( roi.x < 0 || roi.y < 0 || roi.z < 0 || roi.w < 0 )
return cudaErrorInvalidValue;
if( roi.z >= inputWidth || roi.w >= inputHeight )
return cudaErrorInvalidValue;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuCrop<T><<<gridDim, blockDim>>>(input, output, roi.x, roi.y, inputWidth, outputWidth, outputHeight);
return CUDA(cudaGetLastError());
}
// cudaCrop (uint8 grayscale)
cudaError_t cudaCrop( uint8_t* input, uint8_t* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
return launchCrop<uint8_t>(input, output, roi, inputWidth, inputHeight);
}
// cudaCrop (float grayscale)
cudaError_t cudaCrop( float* input, float* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
return launchCrop<float>(input, output, roi, inputWidth, inputHeight);
}
// cudaCrop (uchar3)
cudaError_t cudaCrop( uchar3* input, uchar3* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
return launchCrop<uchar3>(input, output, roi, inputWidth, inputHeight);
}
// cudaCrop (uchar4)
cudaError_t cudaCrop( uchar4* input, uchar4* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
return launchCrop<uchar4>(input, output, roi, inputWidth, inputHeight);
}
// cudaCrop (float3)
cudaError_t cudaCrop( float3* input, float3* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
return launchCrop<float3>(input, output, roi, inputWidth, inputHeight);
}
// cudaCrop (float4)
cudaError_t cudaCrop( float4* input, float4* output, const int4& roi, size_t inputWidth, size_t inputHeight )
{
return launchCrop<float4>(input, output, roi, inputWidth, inputHeight);
}
//-----------------------------------------------------------------------------------
cudaError_t cudaCrop( void* input, void* output, const int4& roi, size_t inputWidth, size_t inputHeight, imageFormat format )
{
if( format == IMAGE_RGB8 || format == IMAGE_BGR8 )
return cudaCrop((uchar3*)input, (uchar3*)output, roi, inputWidth, inputHeight);
else if( format == IMAGE_RGBA8 || format == IMAGE_BGRA8 )
return cudaCrop((uchar4*)input, (uchar4*)output, roi, inputWidth, inputHeight);
else if( format == IMAGE_RGB32F || format == IMAGE_BGR32F )
return cudaCrop((float3*)input, (float3*)output, roi, inputWidth, inputHeight);
else if( format == IMAGE_RGBA32F || format == IMAGE_BGRA32F )
return cudaCrop((float4*)input, (float4*)output, roi, inputWidth, inputHeight);
else if( format == IMAGE_GRAY8 )
return cudaCrop((uint8_t*)input, (uint8_t*)output, roi, inputWidth, inputHeight);
else if( format == IMAGE_GRAY32F )
return cudaCrop((float*)input, (float*)output, roi, inputWidth, inputHeight);
LogError(LOG_CUDA "cudaCrop() -- invalid image format '%s'\n", imageFormatToStr(format));
LogError(LOG_CUDA " supported formats are:\n");
LogError(LOG_CUDA " * gray8\n");
LogError(LOG_CUDA " * gray32f\n");
LogError(LOG_CUDA " * rgb8, bgr8\n");
LogError(LOG_CUDA " * rgba8, bgra8\n");
LogError(LOG_CUDA " * rgb32f, bgr32f\n");
LogError(LOG_CUDA " * rgba32f, bgra32f\n");
return cudaErrorInvalidValue;
}
|
9cfea13b6a3f6eea0f1745592dca36e37d10518a.hip | // !!! This is a file automatically generated by hipify!!!
/*
============================================================================
Name : LidarObstacleVisualizer.cpp
Author : Niklas Bergh
Version :
Copyright :
Description : Program identifies and displays obstacles based on Lidar data
============================================================================
*/
#include <GL/freeglut.h>
#include <stdio.h>
#include <unistd.h>
#include "Headers/StructDefinitions.h"
#include "Headers/LidarUDPReceiver.h"
#include "Headers/CudaFunctions.h"
#include "Headers/CudaErrorCheckFunctions.h"
#define LIDAR_UDP_PORT 2368 // UDP listening port
#define FRAMERATE 60 // The framerate for the openGL graphics
#define MAX_OBSTACLE_DETECTION_DISTANCE 20 // The maximal distance (in meters) that an obstacle can be identified in
#define OBSTACLE_POINT_SIDE_LENGTH 0.05 // The length of one side in the square that represents an obstacle point (decreasing this value by a factor of X will require X times more device memory)
#define MIN_OBSTACLE_DELTA_Z 0.1 // The difference in z coordinates (in meters) required for two points with the same x and y coordinates to be registered as an obstacle
#define MAX_NUMBER_OF_OBSTACLES 5000 //The maximal number of obstacle points that can be displayed
MemoryPointers* memoryPointers;
CameraPosition* cameraPosition;
KeysAndMouseState* keysAndMouseState;
void allocateMemory() {
memoryPointers = new MemoryPointers;
memset(memoryPointers,0,sizeof(MemoryPointers));
cameraPosition = new CameraPosition;
memset(cameraPosition,0,sizeof(CameraPosition));
keysAndMouseState = new KeysAndMouseState;
memset(keysAndMouseState,0,sizeof(KeysAndMouseState));
cameraPosition->z=5; // Start with the camera at z=5;
// First is rawLidarData. It is a byte array of raw UDP data (minus UDP headers and factory bytes), representing 75 UDP packets (one revolution of the lidar sensor). Size is 1200*75 = 90000 bytes
memoryPointers->sizeOfRawLidarData = 90000;
CUDA_CHECK_RETURN(hipHostMalloc((void**)&memoryPointers->rawLidarData,memoryPointers->sizeOfRawLidarData));
CUDA_CHECK_RETURN(hipMalloc((void**)&memoryPointers->rawLidarDataOnDevice,memoryPointers->sizeOfRawLidarData));
// Second is the locationLidarData. This is an array of OpenGlvertex structs, that contain values for x,y,z for 28800 points (one revolution)
memoryPointers->sizeOfLidarPoints = 28800*sizeof(OpenGLvertex);
CUDA_CHECK_RETURN(hipHostMalloc((void**)&memoryPointers->lidarPoints,memoryPointers->sizeOfLidarPoints));
CUDA_CHECK_RETURN(hipMalloc((void**)&memoryPointers->lidarPointsOnDevice,memoryPointers->sizeOfLidarPoints));
// Third is the data that hold the obstacles. Each obstacle is represented as a square (4 vertices) in the graphics
// and 6 bytes of index data needed by glDrawElements
memoryPointers->sizeOfObstacleSquares = MAX_NUMBER_OF_OBSTACLES*4*sizeof(OpenGLvertex);
CUDA_CHECK_RETURN(hipHostMalloc((void**)&memoryPointers->obstacleSquares,memoryPointers->sizeOfObstacleSquares));
CUDA_CHECK_RETURN(hipMalloc((void**)&memoryPointers->obstacleSquaresOnDevice,memoryPointers->sizeOfObstacleSquares));
memoryPointers->sizeOfObstacleSquareIndexesArray = MAX_NUMBER_OF_OBSTACLES*6*sizeof(GLuint);
CUDA_CHECK_RETURN(hipHostMalloc((void**)&memoryPointers->obstacleSquareIndexesArray,memoryPointers->sizeOfObstacleSquareIndexesArray));
// Finally allocate the two obstacle matrices on the device
memoryPointers->numberOfMatrixFieldsPerSide = 2 * MAX_OBSTACLE_DETECTION_DISTANCE / OBSTACLE_POINT_SIDE_LENGTH; // As an integer
memoryPointers->sizeOfObstacleMatrix = memoryPointers->numberOfMatrixFieldsPerSide * memoryPointers->numberOfMatrixFieldsPerSide*sizeof(int);
CUDA_CHECK_RETURN(hipMalloc((void**)&memoryPointers->obstacleMatrixForMaxZOnDevice,memoryPointers->sizeOfObstacleMatrix));
CUDA_CHECK_RETURN(hipMalloc((void**)&memoryPointers->obstacleMatrixForMinZOnDevice,memoryPointers->sizeOfObstacleMatrix));
// Now zero out obstacleSquaresOnDevice and intialize obstacleSquareIndexes:
CUDA_CHECK_RETURN(hipMemset(memoryPointers->obstacleSquaresOnDevice,0,memoryPointers->sizeOfObstacleSquares));
intializeObstacleSquareIndexesArray(memoryPointers,MAX_NUMBER_OF_OBSTACLES);
}
void freeMemory() {
CUDA_CHECK_RETURN(hipHostFree((void*)memoryPointers->rawLidarData));
CUDA_CHECK_RETURN(hipFree(memoryPointers->rawLidarDataOnDevice));
CUDA_CHECK_RETURN(hipHostFree((void*)memoryPointers->lidarPoints));
CUDA_CHECK_RETURN(hipFree(memoryPointers->lidarPointsOnDevice));
CUDA_CHECK_RETURN(hipHostFree((void*)memoryPointers->obstacleSquares));
CUDA_CHECK_RETURN(hipFree(memoryPointers->obstacleSquaresOnDevice));
CUDA_CHECK_RETURN(hipHostFree((void*)memoryPointers->obstacleSquareIndexesArray));
CUDA_CHECK_RETURN(hipFree(memoryPointers->obstacleMatrixForMaxZOnDevice));
CUDA_CHECK_RETURN(hipFree(memoryPointers->obstacleMatrixForMinZOnDevice));
free(memoryPointers);
free(cameraPosition);
free(keysAndMouseState);
}
void moveCameraStep(float stepLength, int direction) {
if (direction==0) {
// Forwards
cameraPosition->x -= stepLength*cos((cameraPosition->roll-90)*2*M_PI/360)*sin(cameraPosition->yaw*2*M_PI/360);
cameraPosition->y += stepLength*cos((cameraPosition->roll-90)*2*M_PI/360)*cos(cameraPosition->yaw*2*M_PI/360);
cameraPosition->z += stepLength*sin((cameraPosition->roll-90)*2*M_PI/360);
}
else if(direction==1) {
//Backwards
cameraPosition->x += stepLength*cos((cameraPosition->roll-90)*2*M_PI/360)*sin(cameraPosition->yaw*2*M_PI/360);
cameraPosition->y -= stepLength*cos((cameraPosition->roll-90)*2*M_PI/360)*cos(cameraPosition->yaw*2*M_PI/360);
cameraPosition->z -= stepLength*sin((cameraPosition->roll-90)*2*M_PI/360);
}
else if(direction==2) {
//Strafe left
cameraPosition->x -= (2*M_PI/360)*sin((cameraPosition->yaw+90)*2*M_PI/360);
cameraPosition->y += (2*M_PI/360)*cos((cameraPosition->yaw+90)*2*M_PI/360);
}
else if(direction==3) {
//Strafe right
cameraPosition->x += (2*M_PI/360)*sin((cameraPosition->yaw+90)*2*M_PI/360);
cameraPosition->y -= (2*M_PI/360)*cos((cameraPosition->yaw+90)*2*M_PI/360);
}
else if(direction==4) {
// Strafe up
cameraPosition->x += stepLength*cos((cameraPosition->roll-180)*2*M_PI/360)*sin(cameraPosition->yaw*2*M_PI/360);
cameraPosition->y -= stepLength*cos((cameraPosition->roll-180)*2*M_PI/360)*cos(cameraPosition->yaw*2*M_PI/360);
cameraPosition->z -= stepLength*sin((cameraPosition->roll-180)*2*M_PI/360);
}
else if(direction==5) {
// Strafe down
cameraPosition->x -= stepLength*cos((cameraPosition->roll-180)*2*M_PI/360)*sin(cameraPosition->yaw*2*M_PI/360);
cameraPosition->y += stepLength*cos((cameraPosition->roll-180)*2*M_PI/360)*cos(cameraPosition->yaw*2*M_PI/360);
cameraPosition->z += stepLength*sin((cameraPosition->roll-180)*2*M_PI/360);
}
}
void updateCameraPositionAccordingToKeys(float timeSinceLastCall) {
float stepLength = 2.0f*timeSinceLastCall;
if (keysAndMouseState->forwardKeyIsPressed) {moveCameraStep(stepLength,0);}
else if (keysAndMouseState->backwardKeyIsPressed) {moveCameraStep(stepLength,1);}
else if (keysAndMouseState->leftStrafeKeyIsPressed) {moveCameraStep((stepLength/8.0),2);}
else if (keysAndMouseState->rightStrafeKeyIsPressed) {moveCameraStep((stepLength/8.0),3);}
else if (keysAndMouseState->upStrafeKeyIsPressed) {moveCameraStep((stepLength/2.0),4);}
else if (keysAndMouseState->downStrafeKeyIsPressed) {moveCameraStep((stepLength/2.0),5);}
}
void handleKeyDown(unsigned char key, int x, int y) {
if (key=='q') {glutLeaveMainLoop();} // Exit the program
else if (key=='w') {keysAndMouseState->forwardKeyIsPressed=true;}
else if (key=='s') {keysAndMouseState->backwardKeyIsPressed=true;}
else if (key=='a') {keysAndMouseState->leftStrafeKeyIsPressed=true;}
else if (key=='d') {keysAndMouseState->rightStrafeKeyIsPressed=true;}
else if (key=='r') {keysAndMouseState->upStrafeKeyIsPressed=true;}
else if (key=='f') {keysAndMouseState->downStrafeKeyIsPressed=true;}
}
void handleKeyUp(unsigned char key, int x, int y) {
if (key=='w') {keysAndMouseState->forwardKeyIsPressed=false;}
else if (key=='s') {keysAndMouseState->backwardKeyIsPressed=false;}
else if (key=='a') {keysAndMouseState->leftStrafeKeyIsPressed=false;}
else if (key=='d') {keysAndMouseState->rightStrafeKeyIsPressed=false;}
else if (key=='r') {keysAndMouseState->upStrafeKeyIsPressed=false;}
else if (key=='f') {keysAndMouseState->downStrafeKeyIsPressed=false;}
}
void handleMouseMove(int x, int y) {
if (keysAndMouseState->leftButtonIsPressed) {
cameraPosition->yaw = cameraPosition->oldYaw + 0.2f*(keysAndMouseState->mousePosXwhenPressed-x);
cameraPosition->roll = cameraPosition->oldRoll + 0.2f*(keysAndMouseState->mousePosYwhenPressed-y);
}
}
void handleMouseClick(int button, int state, int x, int y) {
float scrollStepLength =0.1f;
if (button == GLUT_LEFT_BUTTON) {
if (state == GLUT_DOWN) {
// Left mouse button is pressed
keysAndMouseState->leftButtonIsPressed=true;
keysAndMouseState->mousePosXwhenPressed=x;
keysAndMouseState->mousePosYwhenPressed=y;
}
else {
// Left mouse button is released
keysAndMouseState->leftButtonIsPressed=false;
cameraPosition->oldRoll = cameraPosition->roll;
cameraPosition->oldYaw = cameraPosition->yaw;
}
}
else if (button == 3) {
// Scroll up / move camera forwards
moveCameraStep(scrollStepLength,0);
}
else if(button == 4) {
// Zoom out / move camera backwards
moveCameraStep(scrollStepLength,1);
}
}
void updateFrame(int data) {
glutTimerFunc(1000/FRAMERATE,updateFrame,0); // Call again in 1000/FRAMERATE milliseconds
//timeval curTime;
//gettimeofday(&curTime,NULL);
//printf("%d\n",curTime.tv_usec);
//printf("%s%f%s%f\n","Pitch: ",cameraPosition->pitch,", yaw: ",cameraPosition->yaw);
//printf("%s%f%s%f%s%f\n","X: ",cameraPosition->x,", y: ",cameraPosition->y,", z: ",cameraPosition->z);
// Do the CUDA calculation on the lidar data:
translateLidarDataFromRawToXYZ(memoryPointers);
identifyObstaclesInLidarData(memoryPointers,OBSTACLE_POINT_SIDE_LENGTH,MIN_OBSTACLE_DELTA_Z,MAX_NUMBER_OF_OBSTACLES);
float timeSinceLastCall=0.01;
updateCameraPositionAccordingToKeys(timeSinceLastCall);
glutPostRedisplay();
}
void drawDisplay(void) {
// This function is called by openGL when it decides that the window needs to be redrawn
// Load the modelview matrix and change it according to the position of the camera
glMatrixMode(GL_MODELVIEW);glLoadIdentity();
// Change modelview according to the camera position (inverted because when we want to move the camera somewhere, we move the model in the other direction)
glRotatef(-cameraPosition->roll, 1,0,0);
glRotatef(-cameraPosition->yaw, 0,0,1);
glTranslatef(-cameraPosition->x, -cameraPosition->y, -cameraPosition->z);
// Clear Color and Depth Buffers and enable vertex drawing
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnableClientState( GL_VERTEX_ARRAY );
// Draw the lidar points:
glColor3f(1.0f,1.0f,1.0f); // Set to white
glPointSize(2.0);
glVertexPointer(3, GL_FLOAT, sizeof(OpenGLvertex), &memoryPointers->lidarPoints->x);
glDrawArrays( GL_POINTS, 0, 28800 ); // Draws all the points from the LIDAR
// Draw the obstacle squares:
glColor3f(1.0f,0.0f,0.0f); // Set the color of all the obstaclesquares to red
glVertexPointer(3,GL_FLOAT,sizeof(OpenGLvertex),&memoryPointers->obstacleSquares->x);
glDrawElements(GL_TRIANGLES,6*memoryPointers->currentNrOfObstacles,GL_UNSIGNED_INT,memoryPointers->obstacleSquareIndexesArray);
glDisableClientState( GL_VERTEX_ARRAY );
//glFlush();
glutSwapBuffers();
}
void setUpDisplay() {
// Load the Projection Matrix
glMatrixMode(GL_PROJECTION);glLoadIdentity();
// Set the viewport to be the entire window
glViewport(0, 0, glutGet(GLUT_SCREEN_WIDTH), glutGet(GLUT_SCREEN_HEIGHT));
// Set the correct perspective.
gluPerspective(45.0f, glutGet(GLUT_SCREEN_WIDTH)/(double) glutGet(GLUT_SCREEN_HEIGHT), 0.1f, 100.0f);
}
int main(int argc, char** argv)
{
// Allocate all the memory the program will need
allocateMemory();
// Set up the UDP connection with the lidar sensor:
LidarUDPReceiver lidarUDPReceiver(LIDAR_UDP_PORT);
pthread_t udpReceiverThreadID = lidarUDPReceiver.startReceiverThread(memoryPointers->rawLidarData);
// Init glut and create window
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(0,0);
glutInitWindowSize(1000,1000);
glutCreateWindow("Lidar 3D visualization");
glutFullScreen();
// Set up the openGL projection matrix
setUpDisplay();
// Register callbacks
glutDisplayFunc(drawDisplay);
glutKeyboardFunc(handleKeyDown);
glutKeyboardUpFunc(handleKeyUp);
glutMouseFunc(handleMouseClick);
glutMotionFunc(handleMouseMove);
glutTimerFunc(0,updateFrame,0); // The frame update function (all the work is carried out here)
// Set glut and opengl options:
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE,GLUT_ACTION_GLUTMAINLOOP_RETURNS);
glEnable(GL_DEPTH_TEST);
// Enter GLUT event processing cycle
glutMainLoop();
// Exit the UDP receiver thread
lidarUDPReceiver.setThreadExitFlag();
pthread_join(udpReceiverThreadID,NULL);
// Free allocated data
freeMemory();
printf("Main exited\n");
return 0;
}
| 9cfea13b6a3f6eea0f1745592dca36e37d10518a.cu | /*
============================================================================
Name : LidarObstacleVisualizer.cpp
Author : Niklas Bergh
Version :
Copyright :
Description : Program identifies and displays obstacles based on Lidar data
============================================================================
*/
#include <GL/freeglut.h>
#include <stdio.h>
#include <unistd.h>
#include "Headers/StructDefinitions.h"
#include "Headers/LidarUDPReceiver.h"
#include "Headers/CudaFunctions.h"
#include "Headers/CudaErrorCheckFunctions.h"
#define LIDAR_UDP_PORT 2368 // UDP listening port
#define FRAMERATE 60 // The framerate for the openGL graphics
#define MAX_OBSTACLE_DETECTION_DISTANCE 20 // The maximal distance (in meters) that an obstacle can be identified in
#define OBSTACLE_POINT_SIDE_LENGTH 0.05 // The length of one side in the square that represents an obstacle point (decreasing this value by a factor of X will require X² times more device memory)
#define MIN_OBSTACLE_DELTA_Z 0.1 // The difference in z coordinates (in meters) required for two points with the same x and y coordinates to be registered as an obstacle
#define MAX_NUMBER_OF_OBSTACLES 5000 //The maximal number of obstacle points that can be displayed
MemoryPointers* memoryPointers;
CameraPosition* cameraPosition;
KeysAndMouseState* keysAndMouseState;
void allocateMemory() {
memoryPointers = new MemoryPointers;
memset(memoryPointers,0,sizeof(MemoryPointers));
cameraPosition = new CameraPosition;
memset(cameraPosition,0,sizeof(CameraPosition));
keysAndMouseState = new KeysAndMouseState;
memset(keysAndMouseState,0,sizeof(KeysAndMouseState));
cameraPosition->z=5; // Start with the camera at z=5;
// First is rawLidarData. It is a byte array of raw UDP data (minus UDP headers and factory bytes), representing 75 UDP packets (one revolution of the lidar sensor). Size is 1200*75 = 90000 bytes
memoryPointers->sizeOfRawLidarData = 90000;
CUDA_CHECK_RETURN(cudaMallocHost((void**)&memoryPointers->rawLidarData,memoryPointers->sizeOfRawLidarData));
CUDA_CHECK_RETURN(cudaMalloc((void**)&memoryPointers->rawLidarDataOnDevice,memoryPointers->sizeOfRawLidarData));
// Second is the locationLidarData. This is an array of OpenGlvertex structs, that contain values for x,y,z for 28800 points (one revolution)
memoryPointers->sizeOfLidarPoints = 28800*sizeof(OpenGLvertex);
CUDA_CHECK_RETURN(cudaMallocHost((void**)&memoryPointers->lidarPoints,memoryPointers->sizeOfLidarPoints));
CUDA_CHECK_RETURN(cudaMalloc((void**)&memoryPointers->lidarPointsOnDevice,memoryPointers->sizeOfLidarPoints));
// Third is the data that hold the obstacles. Each obstacle is represented as a square (4 vertices) in the graphics
// and 6 bytes of index data needed by glDrawElements
memoryPointers->sizeOfObstacleSquares = MAX_NUMBER_OF_OBSTACLES*4*sizeof(OpenGLvertex);
CUDA_CHECK_RETURN(cudaMallocHost((void**)&memoryPointers->obstacleSquares,memoryPointers->sizeOfObstacleSquares));
CUDA_CHECK_RETURN(cudaMalloc((void**)&memoryPointers->obstacleSquaresOnDevice,memoryPointers->sizeOfObstacleSquares));
memoryPointers->sizeOfObstacleSquareIndexesArray = MAX_NUMBER_OF_OBSTACLES*6*sizeof(GLuint);
CUDA_CHECK_RETURN(cudaMallocHost((void**)&memoryPointers->obstacleSquareIndexesArray,memoryPointers->sizeOfObstacleSquareIndexesArray));
// Finally allocate the two obstacle matrices on the device
memoryPointers->numberOfMatrixFieldsPerSide = 2 * MAX_OBSTACLE_DETECTION_DISTANCE / OBSTACLE_POINT_SIDE_LENGTH; // As an integer
memoryPointers->sizeOfObstacleMatrix = memoryPointers->numberOfMatrixFieldsPerSide * memoryPointers->numberOfMatrixFieldsPerSide*sizeof(int);
CUDA_CHECK_RETURN(cudaMalloc((void**)&memoryPointers->obstacleMatrixForMaxZOnDevice,memoryPointers->sizeOfObstacleMatrix));
CUDA_CHECK_RETURN(cudaMalloc((void**)&memoryPointers->obstacleMatrixForMinZOnDevice,memoryPointers->sizeOfObstacleMatrix));
// Now zero out obstacleSquaresOnDevice and intialize obstacleSquareIndexes:
CUDA_CHECK_RETURN(cudaMemset(memoryPointers->obstacleSquaresOnDevice,0,memoryPointers->sizeOfObstacleSquares));
intializeObstacleSquareIndexesArray(memoryPointers,MAX_NUMBER_OF_OBSTACLES);
}
void freeMemory() {
CUDA_CHECK_RETURN(cudaFreeHost((void*)memoryPointers->rawLidarData));
CUDA_CHECK_RETURN(cudaFree(memoryPointers->rawLidarDataOnDevice));
CUDA_CHECK_RETURN(cudaFreeHost((void*)memoryPointers->lidarPoints));
CUDA_CHECK_RETURN(cudaFree(memoryPointers->lidarPointsOnDevice));
CUDA_CHECK_RETURN(cudaFreeHost((void*)memoryPointers->obstacleSquares));
CUDA_CHECK_RETURN(cudaFree(memoryPointers->obstacleSquaresOnDevice));
CUDA_CHECK_RETURN(cudaFreeHost((void*)memoryPointers->obstacleSquareIndexesArray));
CUDA_CHECK_RETURN(cudaFree(memoryPointers->obstacleMatrixForMaxZOnDevice));
CUDA_CHECK_RETURN(cudaFree(memoryPointers->obstacleMatrixForMinZOnDevice));
free(memoryPointers);
free(cameraPosition);
free(keysAndMouseState);
}
void moveCameraStep(float stepLength, int direction) {
if (direction==0) {
// Forwards
cameraPosition->x -= stepLength*cos((cameraPosition->roll-90)*2*M_PI/360)*sin(cameraPosition->yaw*2*M_PI/360);
cameraPosition->y += stepLength*cos((cameraPosition->roll-90)*2*M_PI/360)*cos(cameraPosition->yaw*2*M_PI/360);
cameraPosition->z += stepLength*sin((cameraPosition->roll-90)*2*M_PI/360);
}
else if(direction==1) {
//Backwards
cameraPosition->x += stepLength*cos((cameraPosition->roll-90)*2*M_PI/360)*sin(cameraPosition->yaw*2*M_PI/360);
cameraPosition->y -= stepLength*cos((cameraPosition->roll-90)*2*M_PI/360)*cos(cameraPosition->yaw*2*M_PI/360);
cameraPosition->z -= stepLength*sin((cameraPosition->roll-90)*2*M_PI/360);
}
else if(direction==2) {
//Strafe left
cameraPosition->x -= (2*M_PI/360)*sin((cameraPosition->yaw+90)*2*M_PI/360);
cameraPosition->y += (2*M_PI/360)*cos((cameraPosition->yaw+90)*2*M_PI/360);
}
else if(direction==3) {
//Strafe right
cameraPosition->x += (2*M_PI/360)*sin((cameraPosition->yaw+90)*2*M_PI/360);
cameraPosition->y -= (2*M_PI/360)*cos((cameraPosition->yaw+90)*2*M_PI/360);
}
else if(direction==4) {
// Strafe up
cameraPosition->x += stepLength*cos((cameraPosition->roll-180)*2*M_PI/360)*sin(cameraPosition->yaw*2*M_PI/360);
cameraPosition->y -= stepLength*cos((cameraPosition->roll-180)*2*M_PI/360)*cos(cameraPosition->yaw*2*M_PI/360);
cameraPosition->z -= stepLength*sin((cameraPosition->roll-180)*2*M_PI/360);
}
else if(direction==5) {
// Strafe down
cameraPosition->x -= stepLength*cos((cameraPosition->roll-180)*2*M_PI/360)*sin(cameraPosition->yaw*2*M_PI/360);
cameraPosition->y += stepLength*cos((cameraPosition->roll-180)*2*M_PI/360)*cos(cameraPosition->yaw*2*M_PI/360);
cameraPosition->z += stepLength*sin((cameraPosition->roll-180)*2*M_PI/360);
}
}
void updateCameraPositionAccordingToKeys(float timeSinceLastCall) {
float stepLength = 2.0f*timeSinceLastCall;
if (keysAndMouseState->forwardKeyIsPressed) {moveCameraStep(stepLength,0);}
else if (keysAndMouseState->backwardKeyIsPressed) {moveCameraStep(stepLength,1);}
else if (keysAndMouseState->leftStrafeKeyIsPressed) {moveCameraStep((stepLength/8.0),2);}
else if (keysAndMouseState->rightStrafeKeyIsPressed) {moveCameraStep((stepLength/8.0),3);}
else if (keysAndMouseState->upStrafeKeyIsPressed) {moveCameraStep((stepLength/2.0),4);}
else if (keysAndMouseState->downStrafeKeyIsPressed) {moveCameraStep((stepLength/2.0),5);}
}
void handleKeyDown(unsigned char key, int x, int y) {
if (key=='q') {glutLeaveMainLoop();} // Exit the program
else if (key=='w') {keysAndMouseState->forwardKeyIsPressed=true;}
else if (key=='s') {keysAndMouseState->backwardKeyIsPressed=true;}
else if (key=='a') {keysAndMouseState->leftStrafeKeyIsPressed=true;}
else if (key=='d') {keysAndMouseState->rightStrafeKeyIsPressed=true;}
else if (key=='r') {keysAndMouseState->upStrafeKeyIsPressed=true;}
else if (key=='f') {keysAndMouseState->downStrafeKeyIsPressed=true;}
}
void handleKeyUp(unsigned char key, int x, int y) {
if (key=='w') {keysAndMouseState->forwardKeyIsPressed=false;}
else if (key=='s') {keysAndMouseState->backwardKeyIsPressed=false;}
else if (key=='a') {keysAndMouseState->leftStrafeKeyIsPressed=false;}
else if (key=='d') {keysAndMouseState->rightStrafeKeyIsPressed=false;}
else if (key=='r') {keysAndMouseState->upStrafeKeyIsPressed=false;}
else if (key=='f') {keysAndMouseState->downStrafeKeyIsPressed=false;}
}
void handleMouseMove(int x, int y) {
if (keysAndMouseState->leftButtonIsPressed) {
cameraPosition->yaw = cameraPosition->oldYaw + 0.2f*(keysAndMouseState->mousePosXwhenPressed-x);
cameraPosition->roll = cameraPosition->oldRoll + 0.2f*(keysAndMouseState->mousePosYwhenPressed-y);
}
}
void handleMouseClick(int button, int state, int x, int y) {
float scrollStepLength =0.1f;
if (button == GLUT_LEFT_BUTTON) {
if (state == GLUT_DOWN) {
// Left mouse button is pressed
keysAndMouseState->leftButtonIsPressed=true;
keysAndMouseState->mousePosXwhenPressed=x;
keysAndMouseState->mousePosYwhenPressed=y;
}
else {
// Left mouse button is released
keysAndMouseState->leftButtonIsPressed=false;
cameraPosition->oldRoll = cameraPosition->roll;
cameraPosition->oldYaw = cameraPosition->yaw;
}
}
else if (button == 3) {
// Scroll up / move camera forwards
moveCameraStep(scrollStepLength,0);
}
else if(button == 4) {
// Zoom out / move camera backwards
moveCameraStep(scrollStepLength,1);
}
}
void updateFrame(int data) {
glutTimerFunc(1000/FRAMERATE,updateFrame,0); // Call again in 1000/FRAMERATE milliseconds
//timeval curTime;
//gettimeofday(&curTime,NULL);
//printf("%d\n",curTime.tv_usec);
//printf("%s%f%s%f\n","Pitch: ",cameraPosition->pitch,", yaw: ",cameraPosition->yaw);
//printf("%s%f%s%f%s%f\n","X: ",cameraPosition->x,", y: ",cameraPosition->y,", z: ",cameraPosition->z);
// Do the CUDA calculation on the lidar data:
translateLidarDataFromRawToXYZ(memoryPointers);
identifyObstaclesInLidarData(memoryPointers,OBSTACLE_POINT_SIDE_LENGTH,MIN_OBSTACLE_DELTA_Z,MAX_NUMBER_OF_OBSTACLES);
float timeSinceLastCall=0.01;
updateCameraPositionAccordingToKeys(timeSinceLastCall);
glutPostRedisplay();
}
void drawDisplay(void) {
// This function is called by openGL when it decides that the window needs to be redrawn
// Load the modelview matrix and change it according to the position of the camera
glMatrixMode(GL_MODELVIEW);glLoadIdentity();
// Change modelview according to the camera position (inverted because when we want to move the camera somewhere, we move the model in the other direction)
glRotatef(-cameraPosition->roll, 1,0,0);
glRotatef(-cameraPosition->yaw, 0,0,1);
glTranslatef(-cameraPosition->x, -cameraPosition->y, -cameraPosition->z);
// Clear Color and Depth Buffers and enable vertex drawing
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnableClientState( GL_VERTEX_ARRAY );
// Draw the lidar points:
glColor3f(1.0f,1.0f,1.0f); // Set to white
glPointSize(2.0);
glVertexPointer(3, GL_FLOAT, sizeof(OpenGLvertex), &memoryPointers->lidarPoints->x);
glDrawArrays( GL_POINTS, 0, 28800 ); // Draws all the points from the LIDAR
// Draw the obstacle squares:
glColor3f(1.0f,0.0f,0.0f); // Set the color of all the obstaclesquares to red
glVertexPointer(3,GL_FLOAT,sizeof(OpenGLvertex),&memoryPointers->obstacleSquares->x);
glDrawElements(GL_TRIANGLES,6*memoryPointers->currentNrOfObstacles,GL_UNSIGNED_INT,memoryPointers->obstacleSquareIndexesArray);
glDisableClientState( GL_VERTEX_ARRAY );
//glFlush();
glutSwapBuffers();
}
void setUpDisplay() {
// Load the Projection Matrix
glMatrixMode(GL_PROJECTION);glLoadIdentity();
// Set the viewport to be the entire window
glViewport(0, 0, glutGet(GLUT_SCREEN_WIDTH), glutGet(GLUT_SCREEN_HEIGHT));
// Set the correct perspective.
gluPerspective(45.0f, glutGet(GLUT_SCREEN_WIDTH)/(double) glutGet(GLUT_SCREEN_HEIGHT), 0.1f, 100.0f);
}
int main(int argc, char** argv)
{
// Allocate all the memory the program will need
allocateMemory();
// Set up the UDP connection with the lidar sensor:
LidarUDPReceiver lidarUDPReceiver(LIDAR_UDP_PORT);
pthread_t udpReceiverThreadID = lidarUDPReceiver.startReceiverThread(memoryPointers->rawLidarData);
// Init glut and create window
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(0,0);
glutInitWindowSize(1000,1000);
glutCreateWindow("Lidar 3D visualization");
glutFullScreen();
// Set up the openGL projection matrix
setUpDisplay();
// Register callbacks
glutDisplayFunc(drawDisplay);
glutKeyboardFunc(handleKeyDown);
glutKeyboardUpFunc(handleKeyUp);
glutMouseFunc(handleMouseClick);
glutMotionFunc(handleMouseMove);
glutTimerFunc(0,updateFrame,0); // The frame update function (all the work is carried out here)
// Set glut and opengl options:
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE,GLUT_ACTION_GLUTMAINLOOP_RETURNS);
glEnable(GL_DEPTH_TEST);
// Enter GLUT event processing cycle
glutMainLoop();
// Exit the UDP receiver thread
lidarUDPReceiver.setThreadExitFlag();
pthread_join(udpReceiverThreadID,NULL);
// Free allocated data
freeMemory();
printf("Main exited\n");
return 0;
}
|
52751b02369970687a967f2f53ee34148bbad30c.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "assert.h"
#include "matx.h"
#include "test_types.h"
#include "utilities.h"
#include "gtest/gtest.h"
using namespace matx;
template <typename TensorType> struct ViewTestsData {
tensor_t<TensorType, 0> t0{};
tensor_t<TensorType, 1> t1{{10}};
tensor_t<TensorType, 2> t2{{20, 10}};
tensor_t<TensorType, 3> t3{{30, 20, 10}};
tensor_t<TensorType, 4> t4{{40, 30, 20, 10}};
tensor_t<TensorType, 2> t2s = t2.Permute({1, 0});
tensor_t<TensorType, 3> t3s = t3.Permute({2, 1, 0});
tensor_t<TensorType, 4> t4s = t4.Permute({3, 2, 1, 0});
};
template <typename TensorType>
class ViewTestsComplex : public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsFloat : public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsFloatNonComplex
: public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsNumeric : public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsNumericNonComplex
: public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsIntegral : public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsBoolean : public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsAll : public ::testing::Test,
public ViewTestsData<TensorType> {
};
TYPED_TEST_SUITE(ViewTestsAll, MatXAllTypes);
TYPED_TEST_SUITE(ViewTestsComplex, MatXComplexTypes);
TYPED_TEST_SUITE(ViewTestsFloat, MatXFloatTypes);
TYPED_TEST_SUITE(ViewTestsFloatNonComplex, MatXFloatNonComplexTypes);
TYPED_TEST_SUITE(ViewTestsNumeric, MatXNumericTypes);
TYPED_TEST_SUITE(ViewTestsIntegral, MatXAllIntegralTypes);
TYPED_TEST_SUITE(ViewTestsNumericNonComplex, MatXNumericNonComplexTypes);
TYPED_TEST_SUITE(ViewTestsBoolean, MatXBoolTypes);
TYPED_TEST(ViewTestsNumericNonComplex, OverlapView)
{
MATX_ENTER_HANDLER();
tensor_t<TypeParam, 1> a{{10}};
a.SetVals({0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
auto ao = a.OverlapView({4}, {2});
tensor_t<TypeParam, 2> b{{4, 4}};
b.SetVals({{0, 1, 2, 3}, {2, 3, 4, 5}, {4, 5, 6, 7}, {6, 7, 8, 9}});
for (index_t i = 0; i < b.Size(0); i++) {
for (index_t j = 0; j < b.Size(1); j++) {
ASSERT_EQ(ao(i, j), b(i, j));
}
}
auto ao2 = a.OverlapView({4}, {1});
tensor_t<TypeParam, 2> b2{{7, 4}};
b2.SetVals({{0, 1, 2, 3},
{1, 2, 3, 4},
{2, 3, 4, 5},
{3, 4, 5, 6},
{4, 5, 6, 7},
{5, 6, 7, 8},
{6, 7, 8, 9}});
for (index_t i = 0; i < b2.Size(0); i++) {
for (index_t j = 0; j < b2.Size(1); j++) {
ASSERT_EQ(ao2(i, j), b2(i, j));
}
}
auto ao3 = a.OverlapView({4}, {3});
tensor_t<TypeParam, 2> b3{{3, 4}};
b3.SetVals({{0, 1, 2, 3}, {3, 4, 5, 6}, {6, 7, 8, 9}});
for (index_t i = 0; i < b3.Size(0); i++) {
for (index_t j = 0; j < b3.Size(1); j++) {
ASSERT_EQ(ao3(i, j), b3(i, j));
}
}
auto ao4 = a.OverlapView({3}, {2});
tensor_t<TypeParam, 2> b4{{4, 3}};
b4.SetVals({{0, 1, 2}, {2, 3, 4}, {4, 5, 6}, {6, 7, 8}});
for (index_t i = 0; i < b4.Size(0); i++) {
for (index_t j = 0; j < b4.Size(1); j++) {
ASSERT_EQ(ao4(i, j), b4(i, j));
}
}
MATX_EXIT_HANDLER();
}
TYPED_TEST(ViewTestsAll, Stride)
{
MATX_ENTER_HANDLER();
ASSERT_EQ(this->t1.Stride(0), 1);
ASSERT_EQ(this->t2.Stride(1), 1);
ASSERT_EQ(this->t3.Stride(2), 1);
ASSERT_EQ(this->t4.Stride(3), 1);
ASSERT_EQ(this->t2.Stride(0), this->t2.Size(1));
ASSERT_EQ(this->t3.Stride(1), this->t3.Size(2));
ASSERT_EQ(this->t4.Stride(2), this->t4.Size(3));
ASSERT_EQ(this->t3.Stride(0), this->t3.Size(2) * this->t3.Size(1));
ASSERT_EQ(this->t4.Stride(1), this->t4.Size(3) * this->t4.Size(2));
ASSERT_EQ(this->t4.Stride(0),
this->t4.Size(3) * this->t4.Size(2) * this->t4.Size(1));
MATX_EXIT_HANDLER();
}
TYPED_TEST(ViewTestsIntegral, SliceStride)
{
MATX_ENTER_HANDLER();
this->t1.SetVals({10, 20, 30, 40, 50, 60, 70, 80, 90, 100});
auto t1t = this->t1.Slice({0}, {matxEnd}, {2});
for (index_t i = 0; i < this->t1.Size(0); i += 2) {
ASSERT_EQ(this->t1(i), t1t(i / 2));
}
auto t1t2 = this->t1.Slice({2}, {matxEnd}, {2});
for (index_t i = 0; i < t1t2.Size(0); i++) {
ASSERT_EQ(30 + 20 * i, t1t2(i));
}
MATX_EXIT_HANDLER();
}
TYPED_TEST(ViewTestsIntegral, Slice)
{
MATX_ENTER_HANDLER();
auto t2t = this->t2.Slice({1, 2}, {3, 5});
auto t3t = this->t3.Slice({1, 2, 3}, {3, 5, 7});
auto t4t = this->t4.Slice({1, 2, 3, 4}, {3, 5, 7, 9});
// Negative slice test
try {
auto t2e = this->t2.Slice({1, 2}, {1, 2});
ASSERT_EQ(true, false);
}
catch (...) {
ASSERT_EQ(true, true);
}
ASSERT_EQ(t2t.Size(0), 2);
ASSERT_EQ(t2t.Size(1), 3);
ASSERT_EQ(t3t.Size(0), 2);
ASSERT_EQ(t3t.Size(1), 3);
ASSERT_EQ(t3t.Size(2), 4);
ASSERT_EQ(t4t.Size(0), 2);
ASSERT_EQ(t4t.Size(1), 3);
ASSERT_EQ(t4t.Size(2), 4);
ASSERT_EQ(t4t.Size(3), 5);
for (index_t i = 0; i < t2t.Size(0); i++) {
for (index_t j = 0; j < t2t.Size(1); j++) {
ASSERT_EQ(t2t(i, j), this->t2(i + 1, j + 2));
}
}
for (index_t i = 0; i < t3t.Size(0); i++) {
for (index_t j = 0; j < t3t.Size(1); j++) {
for (index_t k = 0; k < t3t.Size(2); k++) {
ASSERT_EQ(t3t(i, j, k), this->t3(i + 1, j + 2, k + 3));
}
}
}
for (index_t i = 0; i < t4t.Size(0); i++) {
for (index_t j = 0; j < t4t.Size(1); j++) {
for (index_t k = 0; k < t4t.Size(2); k++) {
for (index_t l = 0; l < t4t.Size(3); l++) {
ASSERT_EQ(t4t(i, j, k, l), this->t4(i + 1, j + 2, k + 3, l + 4));
}
}
}
}
MATX_EXIT_HANDLER();
}
TYPED_TEST(ViewTestsAll, SliceAndReduce)
{
tensor_t<float, 2> t2t{{20, 10}};
tensor_t<float, 3> t3t{{30, 20, 10}};
MATX_ENTER_HANDLER();
{
index_t j = 0;
auto t2sly = t2t.Slice<1>({0, j}, {matxEnd, matxDropDim});
for (index_t i = 0; i < t2sly.Size(0); i++) {
ASSERT_EQ(t2sly(i), t2t(i, j));
}
}
{
index_t i = 0;
auto t2slx = t2t.Slice<1>({i, 0}, {matxDropDim, matxEnd});
for (index_t j = 0; j < t2slx.Size(0); j++) {
ASSERT_EQ(t2slx(j), t2t(i, j));
}
}
{
index_t j = 0;
index_t k = 0;
auto t3slz = t3t.Slice<1>({0, j, k}, {matxEnd, matxDropDim, matxDropDim});
for (index_t i = 0; i < t3slz.Size(0); i++) {
ASSERT_EQ(t3slz(i), t3t(i, j, k));
}
}
{
index_t i = 0;
index_t k = 0;
auto t3sly = t3t.Slice<1>({i, 0, k}, {matxDropDim, matxEnd, matxDropDim});
for (index_t j = 0; j < t3sly.Size(0); j++) {
ASSERT_EQ(t3sly(j), t3t(i, j, k));
}
}
{
index_t i = 0;
index_t j = 0;
auto t3slx = t3t.Slice<1>({i, j, 0}, {matxDropDim, matxDropDim, matxEnd});
for (index_t k = 0; k < t3slx.Size(0); k++) {
ASSERT_EQ(t3slx(k), t3t(i, j, k));
}
}
{
index_t k = 0;
auto t3slzy = t3t.Slice<2>({0, 0, k}, {matxEnd, matxEnd, matxDropDim});
for (index_t i = 0; i < t3slzy.Size(0); i++) {
for (index_t j = 0; j < t3slzy.Size(1); j++) {
ASSERT_EQ(t3slzy(i, j), t3t(i, j, k));
}
}
}
{
index_t j = 0;
auto t3slzx = t3t.Slice<2>({0, j, 0}, {matxEnd, matxDropDim, matxEnd});
for (index_t i = 0; i < t3slzx.Size(0); i++) {
for (index_t k = 0; k < t3slzx.Size(1); k++) {
ASSERT_EQ(t3slzx(i, k), t3t(i, j, k));
}
}
}
{
index_t i = 0;
auto t3slyx = t3t.Slice<2>({i, 0, 0}, {matxDropDim, matxEnd, matxEnd});
for (index_t j = 0; j < t3slyx.Size(0); j++) {
for (index_t k = 0; k < t3slyx.Size(1); k++) {
ASSERT_EQ(t3slyx(j, k), t3t(i, j, k));
}
}
}
MATX_EXIT_HANDLER();
}
TEST(BasicTensorTest, Clone)
{
tensor_t<float, 0> t0{};
tensor_t<float, 1> t1{{10}};
tensor_t<float, 2> t2{{20, 10}};
tensor_t<float, 3> t3{{30, 20, 10}};
MATX_ENTER_HANDLER();
// clone t0 across 0/1/2/3 dim
auto t0c1 = t0.Clone<1>({5});
ASSERT_EQ(t0c1.Size(0), 5);
for (index_t i = 0; i < t0c1.Size(0); i++) {
ASSERT_EQ(t0c1(i), t0());
}
auto t0c2 = t0.Clone<2>({5, 6});
ASSERT_EQ(t0c2.Size(0), 5);
ASSERT_EQ(t0c2.Size(1), 6);
for (index_t i = 0; i < t0c2.Size(0); i++) {
for (index_t j = 0; j < t0c2.Size(1); j++) {
ASSERT_EQ(t0c2(i, j), t0());
}
}
auto t0c3 = t0.Clone<3>({5, 6, 7});
ASSERT_EQ(t0c3.Size(0), 5);
ASSERT_EQ(t0c3.Size(1), 6);
ASSERT_EQ(t0c3.Size(2), 7);
for (index_t i = 0; i < t0c3.Size(0); i++) {
for (index_t j = 0; j < t0c3.Size(1); j++) {
for (index_t k = 0; k < t0c3.Size(2); k++) {
ASSERT_EQ(t0c3(i, j, k), t0());
}
}
}
auto t0c4 = t0.Clone<4>({5, 6, 7, 8});
ASSERT_EQ(t0c4.Size(0), 5);
ASSERT_EQ(t0c4.Size(1), 6);
ASSERT_EQ(t0c4.Size(2), 7);
ASSERT_EQ(t0c4.Size(3), 8);
for (index_t i = 0; i < t0c4.Size(0); i++) {
for (index_t j = 0; j < t0c4.Size(1); j++) {
for (index_t k = 0; k < t0c4.Size(2); k++) {
for (index_t l = 0; l < t0c4.Size(3); l++) {
ASSERT_EQ(t0c4(i, j, k, l), t0());
}
}
}
}
auto t1c1 = t1.Clone<2>({5, matxKeepDim});
ASSERT_EQ(t1c1.Size(0), 5);
for (index_t i = 0; i < t1c1.Size(0); i++) {
for (index_t j = 0; j < t1c1.Size(1); j++) {
ASSERT_EQ(t1c1(i, j), t1(j));
}
}
auto t1c2 = t1.Clone<3>({5, 6, matxKeepDim});
ASSERT_EQ(t1c2.Size(0), 5);
ASSERT_EQ(t1c2.Size(1), 6);
ASSERT_EQ(t1c2.Size(2), t1.Size(0));
for (index_t i = 0; i < t1c2.Size(0); i++) {
for (index_t j = 0; j < t1c2.Size(1); j++) {
for (index_t k = 0; k < t1c2.Size(2); k++) {
ASSERT_EQ(t1c2(i, j, k), t1(k));
}
}
}
auto t1c3 = t1.Clone<4>({5, 6, 7, matxKeepDim});
ASSERT_EQ(t1c3.Size(0), 5);
ASSERT_EQ(t1c3.Size(1), 6);
ASSERT_EQ(t1c3.Size(2), 7);
ASSERT_EQ(t1c3.Size(3), t1.Size(0));
for (index_t i = 0; i < t1c3.Size(0); i++) {
for (index_t j = 0; j < t1c3.Size(1); j++) {
for (index_t k = 0; k < t1c3.Size(2); k++) {
for (index_t l = 0; l < t1c3.Size(3); l++) {
ASSERT_EQ(t1c3(i, j, k, l), t1(l));
}
}
}
}
// clone t2 across 0/1 dim
auto t2c1 = t2.Clone<3>({5, matxKeepDim, matxKeepDim});
ASSERT_EQ(t2c1.Size(0), 5);
for (index_t i = 0; i < t2c1.Size(0); i++) {
for (index_t j = 0; j < t2c1.Size(1); j++) {
for (index_t k = 0; k < t2c1.Size(2); k++) {
ASSERT_EQ(t2c1(i, j, k), t2(j, k));
}
}
}
auto t2c2 = t2.Clone<4>({5, 6, matxKeepDim, matxKeepDim});
ASSERT_EQ(t2c2.Size(0), 5);
ASSERT_EQ(t2c2.Size(1), 6);
for (index_t i = 0; i < t2c2.Size(0); i++) {
for (index_t j = 0; j < t2c2.Size(1); j++) {
for (index_t k = 0; k < t2c2.Size(2); k++) {
for (index_t l = 0; l < t2c2.Size(3); l++) {
ASSERT_EQ(t2c2(i, j, k, l), t2(k, l));
}
}
}
}
// clone t3 across 0 dim
auto t3c1 = t3.Clone<4>({5, matxKeepDim, matxKeepDim, matxKeepDim});
ASSERT_EQ(t3c1.Size(0), 5);
for (index_t i = 0; i < t3c1.Size(0); i++) {
for (index_t j = 0; j < t3c1.Size(1); j++) {
for (index_t k = 0; k < t3c1.Size(2); k++) {
for (index_t l = 0; l < t3c1.Size(3); l++) {
ASSERT_EQ(t3c1(i, j, k, l), t3(j, k, l));
}
}
}
}
MATX_EXIT_HANDLER();
}
TEST(ViewTests, Random)
{
MATX_ENTER_HANDLER();
{
index_t count = 100;
randomGenerator_t<float> rfloat(count * count * count, 0);
auto t3fu = rfloat.GetTensorView<3>({count, count, count}, UNIFORM);
auto t3fn = rfloat.GetTensorView<3>({count, count, count}, NORMAL);
tensor_t<float, 3> t3f({count, count, count});
(t3f = -1000000).run();
(t3f = t3fu).run();
t3f.PrefetchHost(0);
hipDeviceSynchronize();
float total = 0;
for (index_t i = 0; i < count; i++) {
for (index_t j = 0; j < count; j++) {
for (index_t k = 0; k < count; k++) {
float val = t3f(i, j, k) - 0.5f; // mean centered at zero
ASSERT_NE(val, -1000000);
total += val;
ASSERT_LE(val, 0.5f);
ASSERT_LE(-0.5f, val);
}
}
}
ASSERT_LT(fabs(total / (count * count * count)), .05);
(t3f = -1000000).run();
(t3f = t3fn).run();
t3f.PrefetchHost(0);
hipDeviceSynchronize();
total = 0;
for (index_t i = 0; i < count; i++) {
for (index_t j = 0; j < count; j++) {
for (index_t k = 0; k < count; k++) {
float val = t3f(i, j, k);
ASSERT_NE(val, -1000000);
total += val;
}
}
}
ASSERT_LT(fabs(total / (count * count * count)), .15);
}
MATX_EXIT_HANDLER();
}
TYPED_TEST(ViewTestsComplex, RealComplexView)
{
MATX_ENTER_HANDLER();
tensor_t<TypeParam, 1> tc({10});
auto tr = tc.RealView();
auto ti = tc.ImagView();
for (int i = 0; i < 10; i++) {
TypeParam val(
static_cast<promote_half_t<typename TypeParam::value_type>>(i),
static_cast<promote_half_t<typename TypeParam::value_type>>(i + 10));
tc(i) = val;
}
for (int i = 0; i < 10; i++) {
ASSERT_EQ((float)tc(i).real(), (float)tr(i));
ASSERT_EQ((float)tc(i).imag(), (float)ti(i));
}
MATX_EXIT_HANDLER();
}
| 52751b02369970687a967f2f53ee34148bbad30c.cu | ////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "assert.h"
#include "matx.h"
#include "test_types.h"
#include "utilities.h"
#include "gtest/gtest.h"
using namespace matx;
template <typename TensorType> struct ViewTestsData {
tensor_t<TensorType, 0> t0{};
tensor_t<TensorType, 1> t1{{10}};
tensor_t<TensorType, 2> t2{{20, 10}};
tensor_t<TensorType, 3> t3{{30, 20, 10}};
tensor_t<TensorType, 4> t4{{40, 30, 20, 10}};
tensor_t<TensorType, 2> t2s = t2.Permute({1, 0});
tensor_t<TensorType, 3> t3s = t3.Permute({2, 1, 0});
tensor_t<TensorType, 4> t4s = t4.Permute({3, 2, 1, 0});
};
template <typename TensorType>
class ViewTestsComplex : public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsFloat : public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsFloatNonComplex
: public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsNumeric : public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsNumericNonComplex
: public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsIntegral : public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsBoolean : public ::testing::Test,
public ViewTestsData<TensorType> {
};
template <typename TensorType>
class ViewTestsAll : public ::testing::Test,
public ViewTestsData<TensorType> {
};
TYPED_TEST_SUITE(ViewTestsAll, MatXAllTypes);
TYPED_TEST_SUITE(ViewTestsComplex, MatXComplexTypes);
TYPED_TEST_SUITE(ViewTestsFloat, MatXFloatTypes);
TYPED_TEST_SUITE(ViewTestsFloatNonComplex, MatXFloatNonComplexTypes);
TYPED_TEST_SUITE(ViewTestsNumeric, MatXNumericTypes);
TYPED_TEST_SUITE(ViewTestsIntegral, MatXAllIntegralTypes);
TYPED_TEST_SUITE(ViewTestsNumericNonComplex, MatXNumericNonComplexTypes);
TYPED_TEST_SUITE(ViewTestsBoolean, MatXBoolTypes);
TYPED_TEST(ViewTestsNumericNonComplex, OverlapView)
{
MATX_ENTER_HANDLER();
tensor_t<TypeParam, 1> a{{10}};
a.SetVals({0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
auto ao = a.OverlapView({4}, {2});
tensor_t<TypeParam, 2> b{{4, 4}};
b.SetVals({{0, 1, 2, 3}, {2, 3, 4, 5}, {4, 5, 6, 7}, {6, 7, 8, 9}});
for (index_t i = 0; i < b.Size(0); i++) {
for (index_t j = 0; j < b.Size(1); j++) {
ASSERT_EQ(ao(i, j), b(i, j));
}
}
auto ao2 = a.OverlapView({4}, {1});
tensor_t<TypeParam, 2> b2{{7, 4}};
b2.SetVals({{0, 1, 2, 3},
{1, 2, 3, 4},
{2, 3, 4, 5},
{3, 4, 5, 6},
{4, 5, 6, 7},
{5, 6, 7, 8},
{6, 7, 8, 9}});
for (index_t i = 0; i < b2.Size(0); i++) {
for (index_t j = 0; j < b2.Size(1); j++) {
ASSERT_EQ(ao2(i, j), b2(i, j));
}
}
auto ao3 = a.OverlapView({4}, {3});
tensor_t<TypeParam, 2> b3{{3, 4}};
b3.SetVals({{0, 1, 2, 3}, {3, 4, 5, 6}, {6, 7, 8, 9}});
for (index_t i = 0; i < b3.Size(0); i++) {
for (index_t j = 0; j < b3.Size(1); j++) {
ASSERT_EQ(ao3(i, j), b3(i, j));
}
}
auto ao4 = a.OverlapView({3}, {2});
tensor_t<TypeParam, 2> b4{{4, 3}};
b4.SetVals({{0, 1, 2}, {2, 3, 4}, {4, 5, 6}, {6, 7, 8}});
for (index_t i = 0; i < b4.Size(0); i++) {
for (index_t j = 0; j < b4.Size(1); j++) {
ASSERT_EQ(ao4(i, j), b4(i, j));
}
}
MATX_EXIT_HANDLER();
}
TYPED_TEST(ViewTestsAll, Stride)
{
MATX_ENTER_HANDLER();
ASSERT_EQ(this->t1.Stride(0), 1);
ASSERT_EQ(this->t2.Stride(1), 1);
ASSERT_EQ(this->t3.Stride(2), 1);
ASSERT_EQ(this->t4.Stride(3), 1);
ASSERT_EQ(this->t2.Stride(0), this->t2.Size(1));
ASSERT_EQ(this->t3.Stride(1), this->t3.Size(2));
ASSERT_EQ(this->t4.Stride(2), this->t4.Size(3));
ASSERT_EQ(this->t3.Stride(0), this->t3.Size(2) * this->t3.Size(1));
ASSERT_EQ(this->t4.Stride(1), this->t4.Size(3) * this->t4.Size(2));
ASSERT_EQ(this->t4.Stride(0),
this->t4.Size(3) * this->t4.Size(2) * this->t4.Size(1));
MATX_EXIT_HANDLER();
}
TYPED_TEST(ViewTestsIntegral, SliceStride)
{
MATX_ENTER_HANDLER();
this->t1.SetVals({10, 20, 30, 40, 50, 60, 70, 80, 90, 100});
auto t1t = this->t1.Slice({0}, {matxEnd}, {2});
for (index_t i = 0; i < this->t1.Size(0); i += 2) {
ASSERT_EQ(this->t1(i), t1t(i / 2));
}
auto t1t2 = this->t1.Slice({2}, {matxEnd}, {2});
for (index_t i = 0; i < t1t2.Size(0); i++) {
ASSERT_EQ(30 + 20 * i, t1t2(i));
}
MATX_EXIT_HANDLER();
}
TYPED_TEST(ViewTestsIntegral, Slice)
{
MATX_ENTER_HANDLER();
auto t2t = this->t2.Slice({1, 2}, {3, 5});
auto t3t = this->t3.Slice({1, 2, 3}, {3, 5, 7});
auto t4t = this->t4.Slice({1, 2, 3, 4}, {3, 5, 7, 9});
// Negative slice test
try {
auto t2e = this->t2.Slice({1, 2}, {1, 2});
ASSERT_EQ(true, false);
}
catch (...) {
ASSERT_EQ(true, true);
}
ASSERT_EQ(t2t.Size(0), 2);
ASSERT_EQ(t2t.Size(1), 3);
ASSERT_EQ(t3t.Size(0), 2);
ASSERT_EQ(t3t.Size(1), 3);
ASSERT_EQ(t3t.Size(2), 4);
ASSERT_EQ(t4t.Size(0), 2);
ASSERT_EQ(t4t.Size(1), 3);
ASSERT_EQ(t4t.Size(2), 4);
ASSERT_EQ(t4t.Size(3), 5);
for (index_t i = 0; i < t2t.Size(0); i++) {
for (index_t j = 0; j < t2t.Size(1); j++) {
ASSERT_EQ(t2t(i, j), this->t2(i + 1, j + 2));
}
}
for (index_t i = 0; i < t3t.Size(0); i++) {
for (index_t j = 0; j < t3t.Size(1); j++) {
for (index_t k = 0; k < t3t.Size(2); k++) {
ASSERT_EQ(t3t(i, j, k), this->t3(i + 1, j + 2, k + 3));
}
}
}
for (index_t i = 0; i < t4t.Size(0); i++) {
for (index_t j = 0; j < t4t.Size(1); j++) {
for (index_t k = 0; k < t4t.Size(2); k++) {
for (index_t l = 0; l < t4t.Size(3); l++) {
ASSERT_EQ(t4t(i, j, k, l), this->t4(i + 1, j + 2, k + 3, l + 4));
}
}
}
}
MATX_EXIT_HANDLER();
}
TYPED_TEST(ViewTestsAll, SliceAndReduce)
{
tensor_t<float, 2> t2t{{20, 10}};
tensor_t<float, 3> t3t{{30, 20, 10}};
MATX_ENTER_HANDLER();
{
index_t j = 0;
auto t2sly = t2t.Slice<1>({0, j}, {matxEnd, matxDropDim});
for (index_t i = 0; i < t2sly.Size(0); i++) {
ASSERT_EQ(t2sly(i), t2t(i, j));
}
}
{
index_t i = 0;
auto t2slx = t2t.Slice<1>({i, 0}, {matxDropDim, matxEnd});
for (index_t j = 0; j < t2slx.Size(0); j++) {
ASSERT_EQ(t2slx(j), t2t(i, j));
}
}
{
index_t j = 0;
index_t k = 0;
auto t3slz = t3t.Slice<1>({0, j, k}, {matxEnd, matxDropDim, matxDropDim});
for (index_t i = 0; i < t3slz.Size(0); i++) {
ASSERT_EQ(t3slz(i), t3t(i, j, k));
}
}
{
index_t i = 0;
index_t k = 0;
auto t3sly = t3t.Slice<1>({i, 0, k}, {matxDropDim, matxEnd, matxDropDim});
for (index_t j = 0; j < t3sly.Size(0); j++) {
ASSERT_EQ(t3sly(j), t3t(i, j, k));
}
}
{
index_t i = 0;
index_t j = 0;
auto t3slx = t3t.Slice<1>({i, j, 0}, {matxDropDim, matxDropDim, matxEnd});
for (index_t k = 0; k < t3slx.Size(0); k++) {
ASSERT_EQ(t3slx(k), t3t(i, j, k));
}
}
{
index_t k = 0;
auto t3slzy = t3t.Slice<2>({0, 0, k}, {matxEnd, matxEnd, matxDropDim});
for (index_t i = 0; i < t3slzy.Size(0); i++) {
for (index_t j = 0; j < t3slzy.Size(1); j++) {
ASSERT_EQ(t3slzy(i, j), t3t(i, j, k));
}
}
}
{
index_t j = 0;
auto t3slzx = t3t.Slice<2>({0, j, 0}, {matxEnd, matxDropDim, matxEnd});
for (index_t i = 0; i < t3slzx.Size(0); i++) {
for (index_t k = 0; k < t3slzx.Size(1); k++) {
ASSERT_EQ(t3slzx(i, k), t3t(i, j, k));
}
}
}
{
index_t i = 0;
auto t3slyx = t3t.Slice<2>({i, 0, 0}, {matxDropDim, matxEnd, matxEnd});
for (index_t j = 0; j < t3slyx.Size(0); j++) {
for (index_t k = 0; k < t3slyx.Size(1); k++) {
ASSERT_EQ(t3slyx(j, k), t3t(i, j, k));
}
}
}
MATX_EXIT_HANDLER();
}
TEST(BasicTensorTest, Clone)
{
tensor_t<float, 0> t0{};
tensor_t<float, 1> t1{{10}};
tensor_t<float, 2> t2{{20, 10}};
tensor_t<float, 3> t3{{30, 20, 10}};
MATX_ENTER_HANDLER();
// clone t0 across 0/1/2/3 dim
auto t0c1 = t0.Clone<1>({5});
ASSERT_EQ(t0c1.Size(0), 5);
for (index_t i = 0; i < t0c1.Size(0); i++) {
ASSERT_EQ(t0c1(i), t0());
}
auto t0c2 = t0.Clone<2>({5, 6});
ASSERT_EQ(t0c2.Size(0), 5);
ASSERT_EQ(t0c2.Size(1), 6);
for (index_t i = 0; i < t0c2.Size(0); i++) {
for (index_t j = 0; j < t0c2.Size(1); j++) {
ASSERT_EQ(t0c2(i, j), t0());
}
}
auto t0c3 = t0.Clone<3>({5, 6, 7});
ASSERT_EQ(t0c3.Size(0), 5);
ASSERT_EQ(t0c3.Size(1), 6);
ASSERT_EQ(t0c3.Size(2), 7);
for (index_t i = 0; i < t0c3.Size(0); i++) {
for (index_t j = 0; j < t0c3.Size(1); j++) {
for (index_t k = 0; k < t0c3.Size(2); k++) {
ASSERT_EQ(t0c3(i, j, k), t0());
}
}
}
auto t0c4 = t0.Clone<4>({5, 6, 7, 8});
ASSERT_EQ(t0c4.Size(0), 5);
ASSERT_EQ(t0c4.Size(1), 6);
ASSERT_EQ(t0c4.Size(2), 7);
ASSERT_EQ(t0c4.Size(3), 8);
for (index_t i = 0; i < t0c4.Size(0); i++) {
for (index_t j = 0; j < t0c4.Size(1); j++) {
for (index_t k = 0; k < t0c4.Size(2); k++) {
for (index_t l = 0; l < t0c4.Size(3); l++) {
ASSERT_EQ(t0c4(i, j, k, l), t0());
}
}
}
}
auto t1c1 = t1.Clone<2>({5, matxKeepDim});
ASSERT_EQ(t1c1.Size(0), 5);
for (index_t i = 0; i < t1c1.Size(0); i++) {
for (index_t j = 0; j < t1c1.Size(1); j++) {
ASSERT_EQ(t1c1(i, j), t1(j));
}
}
auto t1c2 = t1.Clone<3>({5, 6, matxKeepDim});
ASSERT_EQ(t1c2.Size(0), 5);
ASSERT_EQ(t1c2.Size(1), 6);
ASSERT_EQ(t1c2.Size(2), t1.Size(0));
for (index_t i = 0; i < t1c2.Size(0); i++) {
for (index_t j = 0; j < t1c2.Size(1); j++) {
for (index_t k = 0; k < t1c2.Size(2); k++) {
ASSERT_EQ(t1c2(i, j, k), t1(k));
}
}
}
auto t1c3 = t1.Clone<4>({5, 6, 7, matxKeepDim});
ASSERT_EQ(t1c3.Size(0), 5);
ASSERT_EQ(t1c3.Size(1), 6);
ASSERT_EQ(t1c3.Size(2), 7);
ASSERT_EQ(t1c3.Size(3), t1.Size(0));
for (index_t i = 0; i < t1c3.Size(0); i++) {
for (index_t j = 0; j < t1c3.Size(1); j++) {
for (index_t k = 0; k < t1c3.Size(2); k++) {
for (index_t l = 0; l < t1c3.Size(3); l++) {
ASSERT_EQ(t1c3(i, j, k, l), t1(l));
}
}
}
}
// clone t2 across 0/1 dim
auto t2c1 = t2.Clone<3>({5, matxKeepDim, matxKeepDim});
ASSERT_EQ(t2c1.Size(0), 5);
for (index_t i = 0; i < t2c1.Size(0); i++) {
for (index_t j = 0; j < t2c1.Size(1); j++) {
for (index_t k = 0; k < t2c1.Size(2); k++) {
ASSERT_EQ(t2c1(i, j, k), t2(j, k));
}
}
}
auto t2c2 = t2.Clone<4>({5, 6, matxKeepDim, matxKeepDim});
ASSERT_EQ(t2c2.Size(0), 5);
ASSERT_EQ(t2c2.Size(1), 6);
for (index_t i = 0; i < t2c2.Size(0); i++) {
for (index_t j = 0; j < t2c2.Size(1); j++) {
for (index_t k = 0; k < t2c2.Size(2); k++) {
for (index_t l = 0; l < t2c2.Size(3); l++) {
ASSERT_EQ(t2c2(i, j, k, l), t2(k, l));
}
}
}
}
// clone t3 across 0 dim
auto t3c1 = t3.Clone<4>({5, matxKeepDim, matxKeepDim, matxKeepDim});
ASSERT_EQ(t3c1.Size(0), 5);
for (index_t i = 0; i < t3c1.Size(0); i++) {
for (index_t j = 0; j < t3c1.Size(1); j++) {
for (index_t k = 0; k < t3c1.Size(2); k++) {
for (index_t l = 0; l < t3c1.Size(3); l++) {
ASSERT_EQ(t3c1(i, j, k, l), t3(j, k, l));
}
}
}
}
MATX_EXIT_HANDLER();
}
TEST(ViewTests, Random)
{
MATX_ENTER_HANDLER();
{
index_t count = 100;
randomGenerator_t<float> rfloat(count * count * count, 0);
auto t3fu = rfloat.GetTensorView<3>({count, count, count}, UNIFORM);
auto t3fn = rfloat.GetTensorView<3>({count, count, count}, NORMAL);
tensor_t<float, 3> t3f({count, count, count});
(t3f = -1000000).run();
(t3f = t3fu).run();
t3f.PrefetchHost(0);
cudaDeviceSynchronize();
float total = 0;
for (index_t i = 0; i < count; i++) {
for (index_t j = 0; j < count; j++) {
for (index_t k = 0; k < count; k++) {
float val = t3f(i, j, k) - 0.5f; // mean centered at zero
ASSERT_NE(val, -1000000);
total += val;
ASSERT_LE(val, 0.5f);
ASSERT_LE(-0.5f, val);
}
}
}
ASSERT_LT(fabs(total / (count * count * count)), .05);
(t3f = -1000000).run();
(t3f = t3fn).run();
t3f.PrefetchHost(0);
cudaDeviceSynchronize();
total = 0;
for (index_t i = 0; i < count; i++) {
for (index_t j = 0; j < count; j++) {
for (index_t k = 0; k < count; k++) {
float val = t3f(i, j, k);
ASSERT_NE(val, -1000000);
total += val;
}
}
}
ASSERT_LT(fabs(total / (count * count * count)), .15);
}
MATX_EXIT_HANDLER();
}
TYPED_TEST(ViewTestsComplex, RealComplexView)
{
MATX_ENTER_HANDLER();
tensor_t<TypeParam, 1> tc({10});
auto tr = tc.RealView();
auto ti = tc.ImagView();
for (int i = 0; i < 10; i++) {
TypeParam val(
static_cast<promote_half_t<typename TypeParam::value_type>>(i),
static_cast<promote_half_t<typename TypeParam::value_type>>(i + 10));
tc(i) = val;
}
for (int i = 0; i < 10; i++) {
ASSERT_EQ((float)tc(i).real(), (float)tr(i));
ASSERT_EQ((float)tc(i).imag(), (float)ti(i));
}
MATX_EXIT_HANDLER();
}
|
27e99d3d95f421aba5e8008205b3f7037a0b066b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__host__ __device__ float get_diff(float* dest, float* bigtex, int bsize, int cx, node_t p, node_t q)
{
//int alln = bigwidth/(bsize*bsize);
float sum=0;
int count=0;
int x0=p.x, y0=p.y;
int x1=q.x, y1=q.y;
bool steep = ( abs(y1 - y0) > abs(x1 - x0) );
if (steep)
{
swap(x0, y0);
swap(x1, y1);
}
if (x0 > x1)
{
swap(x0, x1);
swap(y0, y1);
}
int deltax = x1 - x0;
int deltay = abs(y1 - y0);
int error = deltax / 2 ;
int ystep;
int y = y0;
if (y0 < y1)
ystep = 1 ;
else ystep = -1;
for (int x=x0; x<=x1; x++)
{
if (steep)
{
int ide = y + x*bsize;
float val = dest[ide]-bigtex[cx+(ide*bigw)];
sum+= val*val;
}
else
{
int ide = x + y*bsize;
float val = dest[ide]-bigtex[cx+(ide*bigw)];
sum+= val*val;
}
count++;
error = error - deltay;
if (error < 0)
{
y = y + ystep;
error = error + deltax;
}
}
return sqrt(sum)/count;
}
__host__ __device__ float get_diff_profile(float* dest, float* bigtex, int bsize, int cx, node_t* cand_br1, int csize)
{
float total=0.;
int mw=bsize, mh=bsize;
node_t mid(mw/2,mh/2);
for (int k=0; k<csize; k++)
{
total += get_diff(dest,bigtex,bsize,cx,cand_br1[k],mid);
}
if (csize>2) return total;
if (csize==2)
{
node_t qnode1,qnode2;
qnode1 = cand_br1[0];
qnode2=cand_br1[1];
node_t vec(-(qnode1.y-qnode2.y)/2,(qnode1.x-qnode2.x)/2);
qnode1 = node_t(mid.x+vec.x,mid.y+vec.y);
qnode2 = node_t(mid.x-vec.x,mid.y-vec.y);
total += get_diff(dest,bigtex,bsize,cx,qnode1, mid);
total += get_diff(dest,bigtex,bsize,cx,mid, qnode2);
}
return total;
}
__host__ __device__ float ssdf(float* dest, float* bigtex, int bsize, int cx)
{
float sum = 0.;
int count = 0;
//int ide = 0;
//int alln = bigwidth/(bsize*bsize);
for (int x=0; x<bsize*bsize; x++)
if ( dest[x]!=0.)
{
//count++;
float val = dest[x]-bigtex[cx+x*bigw];
sum+= val*val;
count++;
}
count++;
return sqrt(sum)/count;
}
void getCand(Image& cand, float* bigtex, int cx)
{
for (int j=0; j<cand.height(); j++)
for (int i=0; i<cand.width(); i++){
//cout<<cx<<" "<<bigw<<" "<<bigtex[cx]<<endl;
cand(i,j)=bigtex[cx];
cx+=bigw;
}
//cand.savePGM("/tmp/cand_img.pgm"); cin.get();
}
Image findCand(Image& dest, float* bigtex, int bigwidth, cost_t* candidates, int csize, cost_t* prev, int bsize, int dx, int dy)
{
vector<cost_t> choices;
//sort(candidates,candidates+csize);
int s = csize;
if (s>KNUM) s=KNUM;
for (int k=0; k<s; k++)
{
Image cand(bsize,bsize);
getCand(cand,bigtex,candidates[k].tnode.x);
candidates[k].cost = graphCut_cost(&dest,&cand,dx,dy);
choices.push_back(candidates[k]);
}
sort(choices.begin(),choices.end());
cost_t choice = choices[0];
Image res(bsize,bsize);
getCand(res,bigtex,choice.tnode.x);
*prev = choice;
return res;
}
Image findCand_dev(Image& dest, float* bigtex, thrust::device_vector<cost_t> candidates_dev, int csize, cost_t* prev, int bsize, int dx, int dy)
{
int s = csize;
if (s>KNUM) s=KNUM;
cost_t choice; float mini = 10*INF;
for (int k=0; k<s; k++)
{
Image cand(bsize,bsize);
cost_t cur = candidates_dev[k];
getCand(cand,bigtex,cur.tnode.x);
float cost = graphCut_cost(&dest,&cand,dx,dy);
if (cost<mini){
mini = cost;
choice = cur;
}
}
Image res(bsize,bsize);
getCand(res,bigtex,choice.tnode.x);
*prev = choice;
return res;
}
__host__ __device__ float getCost_noFeature(float* dest, float* bigtex, float* dem_vars, float* usr_var, cost_t dem, int bsize)
{
int cx = dem.tnode.x;
//int cy = dem.tnode.y;
float tmp = 10*ssdf(dest,bigtex,bsize,cx);
if (use_noisestat) tmp+=0.0001*compare_variances(usr_var,dem_vars,dem.vpos,NLEVEL);
return tmp;
}
__global__ void ComputeCosts_noFeature_Kernel( float* dest, float* bigtex, cost_t* candidates, int csize, float* dem_vars, float* usr_var, cost_t* prev, int bsize){
//__shared__ float* dest = destg;
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k<csize){
if ( (!candidates[k].skip) && k>5)
{
candidates[k].cost = getCost_noFeature(dest, bigtex, dem_vars, usr_var, candidates[k], bsize);
}
else
{
candidates[k].cost = INF;
}
}
}
__global__ void buildCandidates_noFeature_Kernel(cost_t* candidates, float* bigtex, float* src_ptr, node_t* dnodes,int nsize, int src_w, int src_h, int bsize){
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k<nsize*(360/DROT+ DMIR))
{
int kn = k%(nsize);
int rt = k/(nsize);
bigw = nsize*(360/DROT+ DMIR);
node_t pnode = dnodes[kn];
int rx = pnode.x;
int ry = pnode.y;
int mid = bsize/2;
int rs = (360/DROT+ DMIR);
int rot = rt*DROT;
cost_t c(node_t(rx,ry),0,rot,0,0);
{
int kpos = (kn*(rs))+rt;
c.vpos = kpos*3;
int cx = kpos; //for coleasced memory access
int cy = 0;
c.tnode = node_t(cx,cy);
c.skip = false;
for (int j=0; j<bsize; j++)
for (int i=0; i<bsize; i++)
{
//cout<<cx+i<<"/"<<cy+i<<": "<<cand(i,j)<<endl;
int ri = 0;
int rj = 0;
float candv=0;
if (rt<360/DROT){
float ni=0;
float nj=0;
ni = (rx+mid) + ((i - mid)*cos_int(rot)) + ((j - mid) * sin_int(rot));
nj = (ry+mid) - ((i - mid)*sin_int(rot)) + ((j - mid) * cos_int(rot));
if (ni>=0 && nj>=0 && ni<src_w && nj<src_h)
candv = cubicInterpol(src_ptr,src_w,src_h,ni,nj);
}
else if (rot==360/DROT){
ri = (rx+bsize-i-1);
rj = (ry+j);
if( ri>=0 && rj>=0 && ri<src_w && rj<src_h)
candv = src_ptr[ ri + rj*src_w ];
}
else {
ri = (rx+i);
rj = (ry+bsize-j-1);
if( ri>=0 && rj>=0 && ri<src_w && rj<src_h)
candv = src_ptr[ ri + rj*src_w ];
}
(bigtex)[cx] = candv;
cx+= (rs*nsize); //for coleasced memory access
if (candv<0.0001){
c.skip = false;
candidates[kpos] = (c);
//return;
}
}
candidates[kpos] = (c);
}
}
}
void match_noFeature_bef(Terrain& dest, Image& src, Image& target, node_list dem_nodes, vector<Image>& tar_pyr, vector<Image>& src_pyr, int bsize, int osize)
{
clock_t start_t, end_t, s_tmp, e_tmp;
start_t = clock();
match_time = 0;
paste_time = 0;
get_target = 0;
int nsize = dem_nodes.size();
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Get pyramid elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
float* variances = get_noise_stats_Feature(src_pyr, dem_nodes, bsize);
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Get noise stats elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
node_t* dnodes = new node_t [nsize];
int rs = (360/DROT);
int bigwidth = nsize*bsize;
int bigheight = (rs+DMIR)*bsize;
bigw = nsize*(rs+DMIR);
Image big(bigwidth,bigheight);
float* bigtex = big.getPixels();
float* src_ptr = src.getPixels();
cost_t* candidates = new cost_t[nsize*(rs+DMIR)];
float* dem_vars = new float [nsize*(rs+DMIR)*3];
{
int count=0;
int rv = 0;
for (node_list::const_iterator it = dem_nodes.begin(); it != dem_nodes.end(); it++ ){
dnodes[count] = *it;
for (int rot=0; rot<360; rot+=DROT)
{
for (int k=0; k<NLEVEL; k++)
dem_vars[rv++] = variances[count*NLEVEL+k];
}
for (int m=0; m<DMIR; m++){
int mir = m+1;
for (int k=0; k<NLEVEL; k++)
dem_vars[rv++] = variances[count*NLEVEL+k];
}
count++;
}
}
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
float cand_time = 0;
cout<<"Prepare candidates! "<< nsize<<"\n";
float* src_dev; hipMalloc((void**) &src_dev, sizeof(float)*src.getWidth()*src.getHeight());
float* bigtex_dev; hipMalloc((void**) &bigtex_dev, sizeof(float)*bigwidth*bigheight);
float* dem_vars_dev; hipMalloc((void**) &dem_vars_dev, sizeof(float)*nsize*rs*3);
//cost_t* candidates_dev; hipMalloc((void**) &candidates_dev, sizeof(cost_t)*nsize*rs);
node_t* dnodes_dev; hipMalloc((void**) &dnodes_dev, sizeof(node_t)*nsize);
hipMemcpy(src_dev, src.getPixels(), sizeof(float)*src.getWidth()*src.getHeight(), hipMemcpyHostToDevice);
//hipMemcpy(bigtex_dev, big.getPixels(), sizeof(float)*big.getWidth()*big.getHeight(), hipMemcpyHostToDevice);
hipMemcpy(dem_vars_dev, dem_vars, sizeof(float)*nsize*(rs+DMIR)*3, hipMemcpyHostToDevice);
thrust::device_vector<cost_t> candidates_dev(nsize*(rs+DMIR));//hipMemcpy(candidates_dev, candidates, sizeof(cost_t)*nsize*rs, hipMemcpyHostToDevice);
hipMemcpy(dnodes_dev, dnodes, sizeof(node_t)*nsize, hipMemcpyHostToDevice);
//cout<<"Start matching non-feature\n"<<endl;
{
dim3 dimBlock(32,rs+DMIR);
dim3 dimGrid( (nsize / dimBlock.x)+1,1); //, A.height / dimBlock.y
int threadsPerBlock = 256;
int blocksPerGrid = ((nsize*(rs+DMIR)) / threadsPerBlock)+1;
cout<<blocksPerGrid<<" "<<threadsPerBlock<<endl;
//buildCandidates_noFeature_Kernel<<<dimGrid, dimBlock>>> (thrust::raw_pointer_cast(&candidates_dev[0]), bigtex_dev, src_dev, dnodes_dev,nsize, src.getWidth(),src.getHeight(),bsize);
hipLaunchKernelGGL(( buildCandidates_noFeature_Kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&candidates_dev[0]), bigtex_dev, src_dev, dnodes_dev,nsize, src.getWidth(),src.getHeight(),bsize);
//hipMemcpy(candidates, candidates_dev, sizeof(cost_t)*nsize*rs, hipMemcpyDeviceToHost);
//hipMemcpy(big.getPixels(), bigtex_dev, sizeof(float)*bigwidth*bigheight, hipMemcpyDeviceToHost);
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
cand_time += elapsed;
start_t = clock();
}
//big.savePGM("/tmp/bigtmp_noFeature.pgm");
cost_t* prev = new cost_t(node_t(-1,-1),0,0,0,0);
int dx = -5*bsize, dy=-5*bsize;
int cnum = 0;
for (int k=0; k<nsize*(rs+DMIR); k++)
if (!candidates[k].skip)
cnum++;
cout<<"Start matching non-feature patches! from "<<cnum<<"\n";
//return;
float* usr_var_dev; hipMalloc((void**) &usr_var_dev,sizeof(float)*NLEVEL);
cost_t* prev_dev; hipMalloc((void **) &prev_dev,sizeof(cost_t));
float* ucand_dev; hipMalloc((void **) &ucand_dev, sizeof(float)*bsize*bsize);
cnum = 0;
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid( (dest.width() / dimBlock.x)+1,(dest.height() / dimBlock.y)+1); //, A.height / dimBlock.y
//cout<<dimGrid.x<<"/"<<dimGrid.y<<": "<<dimBlock.x<<"/"<<dimBlock.y<<endl;
int ide = 220 + 74*dest.width();
//cout<<ide<<" "<<cand_var[ide]<<endl;
list<cost_t> omega;
float* conf = new float [dest.width()*dest.height()];
for (int x=0; x<dest.width(); x++) for (int y=0; y<dest.height(); y++)
if (dest(x,y)>BG)
conf[x+y*dest.width()] = 1;
while (true)
{
int threadsPerBlock = 256;
int blocksPerGrid = ((nsize*(rs+DMIR)) / threadsPerBlock)+1;
s_tmp = clock();
//getNextTarget<<<dimGrid,dimBlock>>> (dest_dev,cand_var_dev,dest.width(), dest.height(), bsize, dx+bsize/2, dy+bsize/2);
//hipMemcpy(cand_var, cand_var_dev, sizeof(float)*dest.getWidth()*dest.getHeight(), hipMemcpyDeviceToHost);
getNextTarget2 (dest.getPixels(), conf, omega,dest.width(), dest.height(), bsize, dx+bsize/2, dy+bsize/2);
if (omega.size()==0) break;
//getNextTarget(dest.getPixels(), cand_var, dest.width(), dest.height(), bsize, dx+bsize/2, dy+bsize/2);
//cout<<ide<<" "<<cand_var[ide]<<endl;
//cin.get();
int maxi = -1;
int xmaxi = -1;
int ymaxi = -1;
float maxv = -INF, tmp;
for (list<cost_t>::iterator it=omega.begin(); it!=omega.end(); it++){
tmp = (*it).cost;
if (tmp>maxv){
node_t p = (*it).org;
maxv = tmp;
xmaxi = p.x;
ymaxi = p.y;
}
}
//cout<<xmaxi<<"/"<<ymaxi<<" --> "<<maxv<<endl;
e_tmp = clock();
get_target+=mstimer(s_tmp,e_tmp);
//if (xmaxi<0) break;
//dy=(maxi/dest.width())-bsize/2;
//dx=(maxi%dest.width())-bsize/2;
dx=(xmaxi)-bsize/2;
dy=(ymaxi)-bsize/2;
s_tmp = clock();
vector<float> usr_var = noise_variances(tar_pyr,dx,dy,bsize);
Image ucand = dest.get_crop(dx,dy,dx+bsize-1,dy+bsize-1);
hipMemcpy(usr_var_dev, &usr_var[0], sizeof(float)*NLEVEL, hipMemcpyHostToDevice);
hipMemcpy(ucand_dev, ucand.getPixels(), bsize*bsize*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ComputeCosts_noFeature_Kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, ucand_dev, bigtex_dev,thrust::raw_pointer_cast(&candidates_dev[0]) , nsize*(rs+DMIR), dem_vars_dev, usr_var_dev, prev_dev, bsize);
thrust::sort(candidates_dev.begin(),candidates_dev.end(),comp);
//hipMemcpy(candidates, candidates_dev, sizeof(cost_t)*nsize*rs, hipMemcpyDeviceToHost);
//Image patch = findCand(dest,bigtex,bigwidth,candidates,nsize*rs, prev,bsize, dx,dy);
Image patch = findCand_dev(dest,bigtex,candidates_dev,nsize*(rs+DMIR), prev,bsize,dx,dy);
hipMemcpy(prev_dev, prev, sizeof(cost_t), hipMemcpyHostToDevice);
e_tmp = clock();
match_time+=mstimer(s_tmp,e_tmp);
s_tmp = clock();
paste_patch(dest,patch, bsize/10, dx, dy);
//dest.savePGM("/tmp/res_tmp_gpu.pgm");
//hipMemcpy(dest_dev, dest.getPixels(), sizeof(float)*dest.width()*dest.height(), hipMemcpyHostToDevice);
e_tmp = clock();
paste_time+=mstimer(s_tmp,e_tmp);
for (int i=0; i<bsize ;i++) for (int j=0; j<bsize ;j++)
if (xmaxi+i>=0 && ymaxi+j>=0 && xmaxi+i<dest.width() && ymaxi+j<dest.height() && dest(xmaxi+i,ymaxi+j)<=BG)
conf[(xmaxi+i)+(ymaxi+j)*dest.width()] = conf[xmaxi+ymaxi*dest.width()];
cnum++;
}
cout<<"Number of targets: "<<cnum<<endl;
delete [] conf;
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
int threadsPerBlock = 256;
int blocksPerGrid = ((nsize*(rs+DMIR)) / threadsPerBlock)+1;
bool havdata = false;
for (int id = 0; id<dest.height()*dest.width(); id++)
if (dest.getPixels()[id]>BG){
havdata = true;
break;
}
if (cnum==0){
cout<<"Number of targets: "<<cnum<<endl;
while (true){
bool finish = true;
for (int y = 0; y<dest.height(); y++)
for (int x = 0; x<dest.width(); x++)
if (dest(x,y)<=BG && ( (!havdata) || onBoundary(dest.getPixels(),dest.width(),dest.height(),x,y)))
{
//dy=dx;
finish = false;
dx = x-bsize/2;
dy = y-bsize/2;
s_tmp = clock();
vector<float> usr_var = noise_variances(tar_pyr,dx,dy,bsize);
Image ucand = dest.get_crop(dx,dy,dx+bsize-1,dy+bsize-1);
hipMemcpy(usr_var_dev, &usr_var[0], sizeof(float)*NLEVEL, hipMemcpyHostToDevice);
hipMemcpy(prev_dev, prev, sizeof(cost_t), hipMemcpyHostToDevice);
hipMemcpy(ucand_dev, ucand.getPixels(), bsize*bsize*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ComputeCosts_noFeature_Kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, ucand_dev, bigtex_dev,thrust::raw_pointer_cast(&candidates_dev[0]) , nsize*(rs+DMIR), dem_vars_dev, usr_var_dev, prev_dev, bsize);
thrust::sort(candidates_dev.begin(),candidates_dev.end(),comp);
//hipMemcpy(candidates, candidates_dev, sizeof(cost_t)*nsize*rs, hipMemcpyDeviceToHost);
//Image patch = findCand(dest,bigtex,bigwidth,candidates,nsize*rs, prev,bsize, dx,dy);
Image patch = findCand_dev(dest,bigtex,candidates_dev,nsize*(rs+DMIR), prev,bsize,dx,dy);
hipMemcpy(prev_dev, prev, sizeof(cost_t), hipMemcpyHostToDevice);
e_tmp = clock();
match_time+=mstimer(s_tmp,e_tmp);
s_tmp = clock();
paste_patch(dest,patch, bsize/10, dx, dy);
//dest.savePGM("/tmp/res_tmp_gpu.pgm");
//hipMemcpy(dest_dev, dest.getPixels(), sizeof(float)*dest.width()*dest.height(), hipMemcpyHostToDevice);
e_tmp = clock();
paste_time+=mstimer(s_tmp,e_tmp);
cnum++;
//dest.savePGM("/tmp/tmp.pgm");
}
if (finish) break;
}
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
}
hipFree(usr_var_dev);
hipFree(prev_dev);
delete prev;
hipFree(ucand_dev);
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
delete [] candidates;
delete [] dnodes;
delete [] dem_vars;
delete [] variances;
//hipFree(candidates_dev);
hipFree(dnodes_dev);
hipFree(dem_vars_dev);
hipFree(src_dev);
hipFree(bigtex_dev);
cerr<<"\n\n*********** Non Feature matching GPU*******************\n";
print_times();
cerr<<" Candidates set: "<<cand_time<<"s\n";
cerr<<" Number of targets: "<<cnum<<"\n";
cerr<<" Number of cands: "<<nsize*(rs+DMIR)<<"\n";
cerr<<"*********** End *******************\n\n";
}
__host__ __device__ float getCost_Feature(float* dest, float* target, float* bigtex, float* dem_vars, node_t* dem_leafs, float* usr_var, node_t* uleafs, cost_t dem, int bsize)
{
float tmp = 0.;
int cx = dem.tnode.x;
int cy = dem.tnode.y;
//Image cand = bigtex.get_crop(dem.tnode.x,dem.tnode.y,dem.tnode.x+(bsize-1),dem.tnode.y+(bsize-1));
//if (use_bend && usr.candleafs.size()>2) tmp+= 100*tps_img(cand,uleafs,dem_leafs,dem.lpos,bsize,dem.lsize);
//if (use_cut) tmp+=graphCut_cost(&dest,&cand,dx,dy);
//if (use_bend && dem.lsize>=3) tmp+= 1000*get_tps(uleafs,dem_leafs,dem.lpos,bsize,dem.lsize) ;
if (use_noisestat) tmp+=0.001*compare_variances(usr_var,dem_vars,dem.vpos,NLEVEL);
if (use_angle) tmp+= 2*getDiffAng(uleafs,dem_leafs,dem.lpos,bsize,dem.lsize);
//cout<<"hi\n";
if (use_profile) tmp+= 5*get_diff_profile(target,bigtex,bsize,cx,uleafs,dem.lsize);
//cout<<"hey\n";
if (use_ssd) tmp+= ssdf(dest,bigtex,bsize,cx);
/*float t = ssdf(dest,bigtex,dsize,bsize,dx,dy,cx,cy);
if (t>0){
cout<<0.01*compare_variances(usr_var,dem_vars,dem.vpos,NLEVEL)<<endl;
cout<<getDiffAng(uleafs,dem_leafs,dem.lpos,bsize,dem.lsize)<<endl;
cout<<get_diff_profile(target,bigtex,bsize,dx,dy,cx,cy,uleafs,dem.lsize)<<endl;
cout<<t/bsize<<endl;
}*/
return tmp;
}
__global__ void ComputeCosts_Feature_Kernel( float* dest, float* target, float* bigtex, cost_t* candidates, int csize, float* dem_vars, node_t* dem_leafs, float* usr_var, node_t* uleafs, cost_t* prev, int bsize, int lsize){
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k<csize){
if ( (!candidates[k].skip) && lsize==candidates[k].lsize && k>5)
{
candidates[k].cost = getCost_Feature(dest, target, bigtex, dem_vars, dem_leafs, usr_var, uleafs, candidates[k], bsize);
}
else
{
candidates[k].cost = INF;
}
}
}
__global__ void buildCandidates_Feature_Kernel(cost_t* candidates, float* bigtex, float* src_ptr, node_t* dnodes, node_t* dem_lsizes, int nsize, int src_w, int src_h, int bsize){
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k<nsize*(360/DROT+ DMIR))
{
int rs = (360/DROT+DMIR);
int kn = k/(rs);
int rt = k%(rs);
bigw = nsize*(360/DROT+ DMIR);
node_t cnode = dnodes[kn];
int mid = bsize/2;
int rx = cnode.x-bsize/2;
int ry = cnode.y-bsize/2;
int rot = rt*DROT;
cost_t c(node_t(rx,ry),0,rot,0,0);
{
//cout<<"hi1\n";
c.cnode = dnodes[kn];
//cout<<"hi2\n";
int kpos = (kn*rs)+rt;
c.lpos = dem_lsizes[kpos].x;
c.lsize = dem_lsizes[kpos].y;
c.vpos = kpos*3;
int cx = kpos; //colaesced memory access
c.tnode = node_t(cx,0);
c.skip =false;
for (int j=0; j<bsize; j++)
for (int i=0; i<bsize; i++)
{
//cout<<cx+i<<"/"<<cy+i<<": "<<cand(i,j)<<endl;
int ri = 0;
int rj = 0;
float candv=0;
if (rt<360/DROT){
float ni=0;
float nj=0;
ni = (rx+mid) + ((i - mid)*cos_int(rot)) + ((j - mid) * sin_int(rot));
nj = (ry+mid) - ((i - mid)*sin_int(rot)) + ((j - mid) * cos_int(rot));
if (ni>=0 && nj>=0 && ni<src_w && nj<src_h)
candv = cubicInterpol(src_ptr,src_w,src_h,ni,nj);
}
else if (rot==360/DROT){
ri = (rx+bsize-i-1);
rj = (ry+j);
if( ri>=0 && rj>=0 && ri<src_w && rj<src_h)
candv = src_ptr[ ri + rj*src_w ];
}
else {
ri = (rx+i);
rj = (ry+bsize-j-1);
if( ri>=0 && rj>=0 && ri<src_w && rj<src_h)
candv = src_ptr[ ri + rj*src_w ];
}
(bigtex)[cx] = candv;
cx += bigw; //colaesced memory access
if (candv<0.0001) c.skip = true;
}
candidates[kpos] = (c);
}
}
}
void match_Feature_bef(Terrain& dest, Tree& usr_features, Tree& dem_features, vector<Image>& tar_pyr, vector<Image>& src_pyr, int bsize)
{
clock_t start_t, end_t, s_tmp, e_tmp;
start_t = clock();
match_time = 0;
paste_time = 0;
get_target = 0;
Image target = usr_features.msource;
Image src = dem_features.msource;
node_list dem_nodes = dem_features.processNodes;
int nsize = dem_nodes.size();
int rs = 360/DROT;
node_t* dnodes = new node_t [nsize];
{
int count=0;
for (node_list::const_iterator it = dem_nodes.begin(); it != dem_nodes.end(); it++ ){
dnodes[count] = *it;
count++;
}
}
int bigwidth = nsize*(rs+DMIR)*bsize*bsize;
int bigheight = 1;
bigw = nsize*(rs+DMIR);
Image big(bigwidth,bigheight);
float* bigtex = big.getPixels();
float* src_ptr = src.getPixels();
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Get pyramid elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
float* variances = get_noise_stats_Feature(src_pyr, dem_nodes, bsize);
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Get noise stats elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
//float* variances = get_noise_stats_Feature(src_pyr, dem_nodes, bsize);
//cost_t* candidates = new cost_t[nsize*rs];
float* dem_vars = new float [nsize*(rs+DMIR)*3];
node_t* dem_leafs = new node_t [nsize*(rs+DMIR)*10];
node_t* dem_lsizes = new node_t [nsize*(rs+DMIR)];
{
int rt = 0;
int rl = 0;
int rv = 0;
int cl=0;
for (int kn=0; kn<nsize; kn++)
{
node_t cnode = dnodes[kn];
for (int rot=0; rot<360; rot+=DROT)
{
vector<node_t> candleafs = getChildren(cnode,dem_features,bsize,rot);
dem_lsizes[rl].x = cl;
dem_lsizes[rl].y = candleafs.size();
cl += dem_lsizes[rl].y;
rl++;
for (unsigned int k=0; k<candleafs.size(); k++)
dem_leafs[rt++]=candleafs[k];
for (int k=0; k<NLEVEL; k++)
dem_vars[rv++] = variances[kn*NLEVEL+k];
}
for (int m=0; m<DMIR; m++){
int mir = m+1;
vector<node_t> candleafs = getChildren(cnode,dem_features,bsize,0,mir);
dem_lsizes[rl].x = cl;
dem_lsizes[rl].y = candleafs.size();
cl += dem_lsizes[rl].y;
rl++;
for (unsigned int k=0; k<candleafs.size(); k++)
dem_leafs[rt++]=candleafs[k];
for (int k=0; k<NLEVEL; k++)
dem_vars[rv++] = variances[kn*NLEVEL+k];
}
}
}
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
float cand_time = 0;
cout<<"Prepare candidates! "<< nsize<<"\n";
start_t = clock();
float* src_dev; hipMalloc((void**) &src_dev, sizeof(float)*src.getWidth()*src.getHeight());
//float* dest_dev; hipMalloc((void**) &dest_dev, sizeof(float)*dest.getWidth()*dest.getHeight());
//float* tar_dev; hipMalloc((void**) &tar_dev, sizeof(float)*target.getWidth()*target.getHeight());
float* bigtex_dev; hipMalloc((void**) &bigtex_dev, sizeof(float)*bigwidth*bigheight);
float* dem_vars_dev; hipMalloc((void**) &dem_vars_dev, sizeof(float)*nsize*(rs+DMIR)*3);
thrust::device_vector<cost_t> candidates_dev(nsize*(rs+DMIR));
//cost_t* candidates_dev; hipMalloc((void**) &candidates_dev, sizeof(cost_t)*nsize*rs);
//cost_t* candidates_dev;
//hipMalloc((void**) &candidates_dev, sizeof)
node_t* dnodes_dev; hipMalloc((void**) &dnodes_dev, sizeof(node_t)*nsize);
node_t* dem_lsizes_dev; hipMalloc((void**) &dem_lsizes_dev, sizeof(node_t)*nsize*(rs+DMIR));
node_t* dem_leafs_dev; hipMalloc((void**) &dem_leafs_dev, sizeof(node_t)*nsize*(rs+DMIR)*5);
hipMemcpy(src_dev, src.getPixels(), sizeof(float)*src.getWidth()*src.getHeight(), hipMemcpyHostToDevice);
// hipMemcpy(dest_dev, dest.getPixels(), sizeof(float)*dest.getWidth()*dest.getHeight(), hipMemcpyHostToDevice);
// hipMemcpy(tar_dev, target.getPixels(), sizeof(float)*target.getWidth()*target.getHeight(), hipMemcpyHostToDevice);
hipMemcpy(bigtex_dev, big.getPixels(), sizeof(float)*big.getWidth()*big.getHeight(), hipMemcpyHostToDevice);
hipMemcpy(dem_vars_dev, dem_vars, sizeof(float)*nsize*(rs+DMIR)*3, hipMemcpyHostToDevice);
//hipMemcpy(candidates_dev, candidates, sizeof(cost_t)*nsize*rs, hipMemcpyHostToDevice);
hipMemcpy(dnodes_dev, dnodes, sizeof(node_t)*nsize, hipMemcpyHostToDevice);
hipMemcpy(dem_lsizes_dev, dem_lsizes, sizeof(node_t)*nsize*(rs+DMIR), hipMemcpyHostToDevice);
hipMemcpy(dem_leafs_dev, dem_leafs, sizeof(node_t)*nsize*(rs+DMIR)*5, hipMemcpyHostToDevice);
int threadsPerBlock = 256;
int blocksPerGrid = ((nsize) / threadsPerBlock)+1;
{
dim3 dimBlock(32,rs+DMIR);
dim3 dimGrid( (nsize / dimBlock.x)+1,1); //, A.height / dimBlock.y
blocksPerGrid = ((nsize*(rs+DMIR)) / threadsPerBlock)+1;
//buildCandidates_Feature_Kernel<<<dimGrid, dimBlock>>> (thrust::raw_pointer_cast(&candidates_dev[0]), bigtex_dev, src_dev, dnodes_dev, dem_lsizes_dev,nsize, src.getWidth(),src.getHeight(),bsize);
hipLaunchKernelGGL(( buildCandidates_Feature_Kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&candidates_dev[0]), bigtex_dev, src_dev, dnodes_dev, dem_lsizes_dev,nsize, src.getWidth(),src.getHeight(),bsize);
//hipMemcpy(candidates, candidates_dev, sizeof(cost_t)*nsize*rs, hipMemcpyDeviceToHost);
hipMemcpy(big.getPixels(), bigtex_dev, sizeof(float)*bigwidth*bigheight, hipMemcpyDeviceToHost);
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
cand_time += elapsed;
start_t = clock();
}
//big.savePGM("/tmp/bigtmp_Feature.pgm");
delete [] dem_lsizes;
hipFree(dem_lsizes_dev);
node_list usr_nodes = usr_features.processNodes;
cost_t* prev = new cost_t(node_t(-1,-1),0,0,0,0);
float* usr_var_dev; hipMalloc((void**) &usr_var_dev,sizeof(float)*NLEVEL);
node_t* uleafs_dev; hipMalloc((void**) &uleafs_dev,sizeof(node_t)*10);
cost_t* prev_dev; hipMalloc((void**) &prev_dev,sizeof(cost_t));
float* ucand_dev; hipMalloc((void**) &ucand_dev,bsize*bsize*sizeof(float));
float* utar_dev; hipMalloc((void**) &utar_dev,bsize*bsize*sizeof(float));
cout<<"Start matching feature patches! "<<usr_nodes.size()<<" from "<<nsize*(rs+DMIR)<<" candidates\n";
//return;
int c_act = nsize*(rs+DMIR);
int dx,dy;
blocksPerGrid = ((nsize*(rs+DMIR)) / threadsPerBlock)+1;
for (node_list::const_iterator it = usr_nodes.begin(); it != usr_nodes.end(); it++ )
{
//dy=dx;
s_tmp = clock();
node_t cnode = *it;
dx = cnode.x-bsize/2;
dy = cnode.y-bsize/2;
//cout<<"Leafs size: "<<usr_features.getcontrolpts(*it).size()<<endl;
//target.get_crop(dx,dy,dx+(bsize-1),dy+(bsize-1),0,0).savePGM("/tmp/cand_usr.pgm");
vector<node_t> uleafs = getChildren(cnode,usr_features,bsize);
vector<float> usr_var = noise_variances(tar_pyr,dx,dy,bsize);
int lsize = imin(uleafs.size(),10);
Image ucand = dest.get_crop(dx,dy,dx+bsize-1,dy+bsize-1);
Image utar = target.get_crop(dx,dy,dx+bsize-1,dy+bsize-1);
hipMemcpy(uleafs_dev, &uleafs[0], sizeof(node_t)*lsize, hipMemcpyHostToDevice);
hipMemcpy(usr_var_dev, &usr_var[0], sizeof(float)*NLEVEL, hipMemcpyHostToDevice);
hipMemcpy(ucand_dev, ucand.getPixels(), sizeof(float)*bsize*bsize, hipMemcpyHostToDevice);
hipMemcpy(utar_dev, utar.getPixels(), sizeof(float)*bsize*bsize, hipMemcpyHostToDevice);
hipMemcpy(prev_dev, prev, sizeof(cost_t), hipMemcpyHostToDevice);
//ComputeCosts_noFeature_Kernel<<<blocksPerGrid, threadsPerBlock>>>(ucand_dev, bigtex_dev,thrust::raw_pointer_cast(&candidates_dev[0]) , nsize*rs, dem_vars_dev, usr_var_dev, prev_dev, bsize);
//cout<<"Look for help\n";
hipLaunchKernelGGL(( ComputeCosts_Feature_Kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, ucand_dev, utar_dev, bigtex_dev,thrust::raw_pointer_cast(&candidates_dev[0]), nsize*(rs+DMIR), dem_vars_dev, dem_leafs_dev, usr_var_dev, uleafs_dev, prev_dev, bsize, lsize);
//cout<<"Got help\n";
thrust::sort(candidates_dev.begin(),candidates_dev.end(),comp);
//cout<<"What now\n";
//hipMemcpy(candidates, candidates_dev, sizeof(cost_t)*nsize*rs, hipMemcpyDeviceToHost);
Image patch = findCand_dev(dest,bigtex,candidates_dev,nsize*(rs+DMIR), prev, bsize, dx,dy);
//cout<<prev->org.x<<" "<<prev->org.y<<" "<<prev->rot<<" "<<prev->cost<<endl;
if (prev->rot<360){
Image patch2 = src.get_crop(prev->org.x,prev->org.y,prev->org.x+bsize-1,prev->org.y+bsize-1,prev->rot);
//patch2.savePGM("./tmp/res_cand2.pgm");
patch_merging(&dest, &patch2, dx, dy,1,bsize/10.);
}
else{
Image patch2 = src.get_crop(prev->org.x,prev->org.y,prev->org.x+bsize-1,prev->org.y+bsize-1,0,(prev->rot-360)/DROT+1);
//patch2.savePGM("./tmp/res_cand2.pgm");
patch_merging(&dest, &patch2, dx, dy,1,bsize/10.);
}
//patch.savePGM("./tmp/res_cand1.pgm");
//cin.get();
//hipMemcpy(prev_dev, prev, sizeof(cost_t), hipMemcpyHostToDevice);
e_tmp = clock();
match_time+=mstimer(s_tmp,e_tmp);
s_tmp = clock();
//patch_merging(&dest, &patch, dx, dy,1,bsize/10.);
e_tmp = clock();
paste_time+=mstimer(s_tmp,e_tmp);
//dest.savePGM("/tmp/res_tmp_cpu.pgm",dest.maxval);
//dest.saveTerragen("/tmp/res_tmp_cpu.ter");
//cin.get();
}
hipFree(usr_var_dev);
hipFree(uleafs_dev);
hipFree(prev_dev);
hipFree(ucand_dev);
hipFree(utar_dev);
delete prev;
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
//delete [] candidates;
delete [] dnodes;
delete [] dem_vars;
delete [] variances;
delete [] dem_leafs;
//hipFree(candidates_dev);
hipFree(dnodes_dev);
hipFree(dem_vars_dev);
hipFree(dem_leafs_dev);
hipFree(src_dev);
//hipFree(dest_dev);
//hipFree(tar_dev);
hipFree(bigtex_dev);
cerr<<"\n\n*********** Feature matching GPU*******************\n";
print_times();
cerr<<" Candidates set: "<<cand_time<<"s\n";
cerr<<" Number of targets: "<<usr_nodes.size()<<"\n";
cerr<<" Number of cands: "<<nsize*(rs+DMIR)<<"\n";
cerr<<"*********** End *******************\n\n";
}
| 27e99d3d95f421aba5e8008205b3f7037a0b066b.cu |
__host__ __device__ float get_diff(float* dest, float* bigtex, int bsize, int cx, node_t p, node_t q)
{
//int alln = bigwidth/(bsize*bsize);
float sum=0;
int count=0;
int x0=p.x, y0=p.y;
int x1=q.x, y1=q.y;
bool steep = ( abs(y1 - y0) > abs(x1 - x0) );
if (steep)
{
swap(x0, y0);
swap(x1, y1);
}
if (x0 > x1)
{
swap(x0, x1);
swap(y0, y1);
}
int deltax = x1 - x0;
int deltay = abs(y1 - y0);
int error = deltax / 2 ;
int ystep;
int y = y0;
if (y0 < y1)
ystep = 1 ;
else ystep = -1;
for (int x=x0; x<=x1; x++)
{
if (steep)
{
int ide = y + x*bsize;
float val = dest[ide]-bigtex[cx+(ide*bigw)];
sum+= val*val;
}
else
{
int ide = x + y*bsize;
float val = dest[ide]-bigtex[cx+(ide*bigw)];
sum+= val*val;
}
count++;
error = error - deltay;
if (error < 0)
{
y = y + ystep;
error = error + deltax;
}
}
return sqrt(sum)/count;
}
__host__ __device__ float get_diff_profile(float* dest, float* bigtex, int bsize, int cx, node_t* cand_br1, int csize)
{
float total=0.;
int mw=bsize, mh=bsize;
node_t mid(mw/2,mh/2);
for (int k=0; k<csize; k++)
{
total += get_diff(dest,bigtex,bsize,cx,cand_br1[k],mid);
}
if (csize>2) return total;
if (csize==2)
{
node_t qnode1,qnode2;
qnode1 = cand_br1[0];
qnode2=cand_br1[1];
node_t vec(-(qnode1.y-qnode2.y)/2,(qnode1.x-qnode2.x)/2);
qnode1 = node_t(mid.x+vec.x,mid.y+vec.y);
qnode2 = node_t(mid.x-vec.x,mid.y-vec.y);
total += get_diff(dest,bigtex,bsize,cx,qnode1, mid);
total += get_diff(dest,bigtex,bsize,cx,mid, qnode2);
}
return total;
}
__host__ __device__ float ssdf(float* dest, float* bigtex, int bsize, int cx)
{
float sum = 0.;
int count = 0;
//int ide = 0;
//int alln = bigwidth/(bsize*bsize);
for (int x=0; x<bsize*bsize; x++)
if ( dest[x]!=0.)
{
//count++;
float val = dest[x]-bigtex[cx+x*bigw];
sum+= val*val;
count++;
}
count++;
return sqrt(sum)/count;
}
void getCand(Image& cand, float* bigtex, int cx)
{
for (int j=0; j<cand.height(); j++)
for (int i=0; i<cand.width(); i++){
//cout<<cx<<" "<<bigw<<" "<<bigtex[cx]<<endl;
cand(i,j)=bigtex[cx];
cx+=bigw;
}
//cand.savePGM("/tmp/cand_img.pgm"); cin.get();
}
Image findCand(Image& dest, float* bigtex, int bigwidth, cost_t* candidates, int csize, cost_t* prev, int bsize, int dx, int dy)
{
vector<cost_t> choices;
//sort(candidates,candidates+csize);
int s = csize;
if (s>KNUM) s=KNUM;
for (int k=0; k<s; k++)
{
Image cand(bsize,bsize);
getCand(cand,bigtex,candidates[k].tnode.x);
candidates[k].cost = graphCut_cost(&dest,&cand,dx,dy);
choices.push_back(candidates[k]);
}
sort(choices.begin(),choices.end());
cost_t choice = choices[0];
Image res(bsize,bsize);
getCand(res,bigtex,choice.tnode.x);
*prev = choice;
return res;
}
Image findCand_dev(Image& dest, float* bigtex, thrust::device_vector<cost_t> candidates_dev, int csize, cost_t* prev, int bsize, int dx, int dy)
{
int s = csize;
if (s>KNUM) s=KNUM;
cost_t choice; float mini = 10*INF;
for (int k=0; k<s; k++)
{
Image cand(bsize,bsize);
cost_t cur = candidates_dev[k];
getCand(cand,bigtex,cur.tnode.x);
float cost = graphCut_cost(&dest,&cand,dx,dy);
if (cost<mini){
mini = cost;
choice = cur;
}
}
Image res(bsize,bsize);
getCand(res,bigtex,choice.tnode.x);
*prev = choice;
return res;
}
__host__ __device__ float getCost_noFeature(float* dest, float* bigtex, float* dem_vars, float* usr_var, cost_t dem, int bsize)
{
int cx = dem.tnode.x;
//int cy = dem.tnode.y;
float tmp = 10*ssdf(dest,bigtex,bsize,cx);
if (use_noisestat) tmp+=0.0001*compare_variances(usr_var,dem_vars,dem.vpos,NLEVEL);
return tmp;
}
__global__ void ComputeCosts_noFeature_Kernel( float* dest, float* bigtex, cost_t* candidates, int csize, float* dem_vars, float* usr_var, cost_t* prev, int bsize){
//__shared__ float* dest = destg;
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k<csize){
if ( (!candidates[k].skip) && k>5)
{
candidates[k].cost = getCost_noFeature(dest, bigtex, dem_vars, usr_var, candidates[k], bsize);
}
else
{
candidates[k].cost = INF;
}
}
}
__global__ void buildCandidates_noFeature_Kernel(cost_t* candidates, float* bigtex, float* src_ptr, node_t* dnodes,int nsize, int src_w, int src_h, int bsize){
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k<nsize*(360/DROT+ DMIR))
{
int kn = k%(nsize);
int rt = k/(nsize);
bigw = nsize*(360/DROT+ DMIR);
node_t pnode = dnodes[kn];
int rx = pnode.x;
int ry = pnode.y;
int mid = bsize/2;
int rs = (360/DROT+ DMIR);
int rot = rt*DROT;
cost_t c(node_t(rx,ry),0,rot,0,0);
{
int kpos = (kn*(rs))+rt;
c.vpos = kpos*3;
int cx = kpos; //for coleasced memory access
int cy = 0;
c.tnode = node_t(cx,cy);
c.skip = false;
for (int j=0; j<bsize; j++)
for (int i=0; i<bsize; i++)
{
//cout<<cx+i<<"/"<<cy+i<<": "<<cand(i,j)<<endl;
int ri = 0;
int rj = 0;
float candv=0;
if (rt<360/DROT){
float ni=0;
float nj=0;
ni = (rx+mid) + ((i - mid)*cos_int(rot)) + ((j - mid) * sin_int(rot));
nj = (ry+mid) - ((i - mid)*sin_int(rot)) + ((j - mid) * cos_int(rot));
if (ni>=0 && nj>=0 && ni<src_w && nj<src_h)
candv = cubicInterpol(src_ptr,src_w,src_h,ni,nj);
}
else if (rot==360/DROT){
ri = (rx+bsize-i-1);
rj = (ry+j);
if( ri>=0 && rj>=0 && ri<src_w && rj<src_h)
candv = src_ptr[ ri + rj*src_w ];
}
else {
ri = (rx+i);
rj = (ry+bsize-j-1);
if( ri>=0 && rj>=0 && ri<src_w && rj<src_h)
candv = src_ptr[ ri + rj*src_w ];
}
(bigtex)[cx] = candv;
cx+= (rs*nsize); //for coleasced memory access
if (candv<0.0001){
c.skip = false;
candidates[kpos] = (c);
//return;
}
}
candidates[kpos] = (c);
}
}
}
void match_noFeature_bef(Terrain& dest, Image& src, Image& target, node_list dem_nodes, vector<Image>& tar_pyr, vector<Image>& src_pyr, int bsize, int osize)
{
clock_t start_t, end_t, s_tmp, e_tmp;
start_t = clock();
match_time = 0;
paste_time = 0;
get_target = 0;
int nsize = dem_nodes.size();
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Get pyramid elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
float* variances = get_noise_stats_Feature(src_pyr, dem_nodes, bsize);
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Get noise stats elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
node_t* dnodes = new node_t [nsize];
int rs = (360/DROT);
int bigwidth = nsize*bsize;
int bigheight = (rs+DMIR)*bsize;
bigw = nsize*(rs+DMIR);
Image big(bigwidth,bigheight);
float* bigtex = big.getPixels();
float* src_ptr = src.getPixels();
cost_t* candidates = new cost_t[nsize*(rs+DMIR)];
float* dem_vars = new float [nsize*(rs+DMIR)*3];
{
int count=0;
int rv = 0;
for (node_list::const_iterator it = dem_nodes.begin(); it != dem_nodes.end(); it++ ){
dnodes[count] = *it;
for (int rot=0; rot<360; rot+=DROT)
{
for (int k=0; k<NLEVEL; k++)
dem_vars[rv++] = variances[count*NLEVEL+k];
}
for (int m=0; m<DMIR; m++){
int mir = m+1;
for (int k=0; k<NLEVEL; k++)
dem_vars[rv++] = variances[count*NLEVEL+k];
}
count++;
}
}
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
float cand_time = 0;
cout<<"Prepare candidates! "<< nsize<<"\n";
float* src_dev; cudaMalloc((void**) &src_dev, sizeof(float)*src.getWidth()*src.getHeight());
float* bigtex_dev; cudaMalloc((void**) &bigtex_dev, sizeof(float)*bigwidth*bigheight);
float* dem_vars_dev; cudaMalloc((void**) &dem_vars_dev, sizeof(float)*nsize*rs*3);
//cost_t* candidates_dev; cudaMalloc((void**) &candidates_dev, sizeof(cost_t)*nsize*rs);
node_t* dnodes_dev; cudaMalloc((void**) &dnodes_dev, sizeof(node_t)*nsize);
cudaMemcpy(src_dev, src.getPixels(), sizeof(float)*src.getWidth()*src.getHeight(), cudaMemcpyHostToDevice);
//cudaMemcpy(bigtex_dev, big.getPixels(), sizeof(float)*big.getWidth()*big.getHeight(), cudaMemcpyHostToDevice);
cudaMemcpy(dem_vars_dev, dem_vars, sizeof(float)*nsize*(rs+DMIR)*3, cudaMemcpyHostToDevice);
thrust::device_vector<cost_t> candidates_dev(nsize*(rs+DMIR));//cudaMemcpy(candidates_dev, candidates, sizeof(cost_t)*nsize*rs, cudaMemcpyHostToDevice);
cudaMemcpy(dnodes_dev, dnodes, sizeof(node_t)*nsize, cudaMemcpyHostToDevice);
//cout<<"Start matching non-feature\n"<<endl;
{
dim3 dimBlock(32,rs+DMIR);
dim3 dimGrid( (nsize / dimBlock.x)+1,1); //, A.height / dimBlock.y
int threadsPerBlock = 256;
int blocksPerGrid = ((nsize*(rs+DMIR)) / threadsPerBlock)+1;
cout<<blocksPerGrid<<" "<<threadsPerBlock<<endl;
//buildCandidates_noFeature_Kernel<<<dimGrid, dimBlock>>> (thrust::raw_pointer_cast(&candidates_dev[0]), bigtex_dev, src_dev, dnodes_dev,nsize, src.getWidth(),src.getHeight(),bsize);
buildCandidates_noFeature_Kernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&candidates_dev[0]), bigtex_dev, src_dev, dnodes_dev,nsize, src.getWidth(),src.getHeight(),bsize);
//cudaMemcpy(candidates, candidates_dev, sizeof(cost_t)*nsize*rs, cudaMemcpyDeviceToHost);
//cudaMemcpy(big.getPixels(), bigtex_dev, sizeof(float)*bigwidth*bigheight, cudaMemcpyDeviceToHost);
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
cand_time += elapsed;
start_t = clock();
}
//big.savePGM("/tmp/bigtmp_noFeature.pgm");
cost_t* prev = new cost_t(node_t(-1,-1),0,0,0,0);
int dx = -5*bsize, dy=-5*bsize;
int cnum = 0;
for (int k=0; k<nsize*(rs+DMIR); k++)
if (!candidates[k].skip)
cnum++;
cout<<"Start matching non-feature patches! from "<<cnum<<"\n";
//return;
float* usr_var_dev; cudaMalloc((void**) &usr_var_dev,sizeof(float)*NLEVEL);
cost_t* prev_dev; cudaMalloc((void **) &prev_dev,sizeof(cost_t));
float* ucand_dev; cudaMalloc((void **) &ucand_dev, sizeof(float)*bsize*bsize);
cnum = 0;
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid( (dest.width() / dimBlock.x)+1,(dest.height() / dimBlock.y)+1); //, A.height / dimBlock.y
//cout<<dimGrid.x<<"/"<<dimGrid.y<<": "<<dimBlock.x<<"/"<<dimBlock.y<<endl;
int ide = 220 + 74*dest.width();
//cout<<ide<<" "<<cand_var[ide]<<endl;
list<cost_t> omega;
float* conf = new float [dest.width()*dest.height()];
for (int x=0; x<dest.width(); x++) for (int y=0; y<dest.height(); y++)
if (dest(x,y)>BG)
conf[x+y*dest.width()] = 1;
while (true)
{
int threadsPerBlock = 256;
int blocksPerGrid = ((nsize*(rs+DMIR)) / threadsPerBlock)+1;
s_tmp = clock();
//getNextTarget<<<dimGrid,dimBlock>>> (dest_dev,cand_var_dev,dest.width(), dest.height(), bsize, dx+bsize/2, dy+bsize/2);
//cudaMemcpy(cand_var, cand_var_dev, sizeof(float)*dest.getWidth()*dest.getHeight(), cudaMemcpyDeviceToHost);
getNextTarget2 (dest.getPixels(), conf, omega,dest.width(), dest.height(), bsize, dx+bsize/2, dy+bsize/2);
if (omega.size()==0) break;
//getNextTarget(dest.getPixels(), cand_var, dest.width(), dest.height(), bsize, dx+bsize/2, dy+bsize/2);
//cout<<ide<<" "<<cand_var[ide]<<endl;
//cin.get();
int maxi = -1;
int xmaxi = -1;
int ymaxi = -1;
float maxv = -INF, tmp;
for (list<cost_t>::iterator it=omega.begin(); it!=omega.end(); it++){
tmp = (*it).cost;
if (tmp>maxv){
node_t p = (*it).org;
maxv = tmp;
xmaxi = p.x;
ymaxi = p.y;
}
}
//cout<<xmaxi<<"/"<<ymaxi<<" --> "<<maxv<<endl;
e_tmp = clock();
get_target+=mstimer(s_tmp,e_tmp);
//if (xmaxi<0) break;
//dy=(maxi/dest.width())-bsize/2;
//dx=(maxi%dest.width())-bsize/2;
dx=(xmaxi)-bsize/2;
dy=(ymaxi)-bsize/2;
s_tmp = clock();
vector<float> usr_var = noise_variances(tar_pyr,dx,dy,bsize);
Image ucand = dest.get_crop(dx,dy,dx+bsize-1,dy+bsize-1);
cudaMemcpy(usr_var_dev, &usr_var[0], sizeof(float)*NLEVEL, cudaMemcpyHostToDevice);
cudaMemcpy(ucand_dev, ucand.getPixels(), bsize*bsize*sizeof(float), cudaMemcpyHostToDevice);
ComputeCosts_noFeature_Kernel<<<blocksPerGrid, threadsPerBlock>>>(ucand_dev, bigtex_dev,thrust::raw_pointer_cast(&candidates_dev[0]) , nsize*(rs+DMIR), dem_vars_dev, usr_var_dev, prev_dev, bsize);
thrust::sort(candidates_dev.begin(),candidates_dev.end(),comp);
//cudaMemcpy(candidates, candidates_dev, sizeof(cost_t)*nsize*rs, cudaMemcpyDeviceToHost);
//Image patch = findCand(dest,bigtex,bigwidth,candidates,nsize*rs, prev,bsize, dx,dy);
Image patch = findCand_dev(dest,bigtex,candidates_dev,nsize*(rs+DMIR), prev,bsize,dx,dy);
cudaMemcpy(prev_dev, prev, sizeof(cost_t), cudaMemcpyHostToDevice);
e_tmp = clock();
match_time+=mstimer(s_tmp,e_tmp);
s_tmp = clock();
paste_patch(dest,patch, bsize/10, dx, dy);
//dest.savePGM("/tmp/res_tmp_gpu.pgm");
//cudaMemcpy(dest_dev, dest.getPixels(), sizeof(float)*dest.width()*dest.height(), cudaMemcpyHostToDevice);
e_tmp = clock();
paste_time+=mstimer(s_tmp,e_tmp);
for (int i=0; i<bsize ;i++) for (int j=0; j<bsize ;j++)
if (xmaxi+i>=0 && ymaxi+j>=0 && xmaxi+i<dest.width() && ymaxi+j<dest.height() && dest(xmaxi+i,ymaxi+j)<=BG)
conf[(xmaxi+i)+(ymaxi+j)*dest.width()] = conf[xmaxi+ymaxi*dest.width()];
cnum++;
}
cout<<"Number of targets: "<<cnum<<endl;
delete [] conf;
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
int threadsPerBlock = 256;
int blocksPerGrid = ((nsize*(rs+DMIR)) / threadsPerBlock)+1;
bool havdata = false;
for (int id = 0; id<dest.height()*dest.width(); id++)
if (dest.getPixels()[id]>BG){
havdata = true;
break;
}
if (cnum==0){
cout<<"Number of targets: "<<cnum<<endl;
while (true){
bool finish = true;
for (int y = 0; y<dest.height(); y++)
for (int x = 0; x<dest.width(); x++)
if (dest(x,y)<=BG && ( (!havdata) || onBoundary(dest.getPixels(),dest.width(),dest.height(),x,y)))
{
//dy=dx;
finish = false;
dx = x-bsize/2;
dy = y-bsize/2;
s_tmp = clock();
vector<float> usr_var = noise_variances(tar_pyr,dx,dy,bsize);
Image ucand = dest.get_crop(dx,dy,dx+bsize-1,dy+bsize-1);
cudaMemcpy(usr_var_dev, &usr_var[0], sizeof(float)*NLEVEL, cudaMemcpyHostToDevice);
cudaMemcpy(prev_dev, prev, sizeof(cost_t), cudaMemcpyHostToDevice);
cudaMemcpy(ucand_dev, ucand.getPixels(), bsize*bsize*sizeof(float), cudaMemcpyHostToDevice);
ComputeCosts_noFeature_Kernel<<<blocksPerGrid, threadsPerBlock>>>(ucand_dev, bigtex_dev,thrust::raw_pointer_cast(&candidates_dev[0]) , nsize*(rs+DMIR), dem_vars_dev, usr_var_dev, prev_dev, bsize);
thrust::sort(candidates_dev.begin(),candidates_dev.end(),comp);
//cudaMemcpy(candidates, candidates_dev, sizeof(cost_t)*nsize*rs, cudaMemcpyDeviceToHost);
//Image patch = findCand(dest,bigtex,bigwidth,candidates,nsize*rs, prev,bsize, dx,dy);
Image patch = findCand_dev(dest,bigtex,candidates_dev,nsize*(rs+DMIR), prev,bsize,dx,dy);
cudaMemcpy(prev_dev, prev, sizeof(cost_t), cudaMemcpyHostToDevice);
e_tmp = clock();
match_time+=mstimer(s_tmp,e_tmp);
s_tmp = clock();
paste_patch(dest,patch, bsize/10, dx, dy);
//dest.savePGM("/tmp/res_tmp_gpu.pgm");
//cudaMemcpy(dest_dev, dest.getPixels(), sizeof(float)*dest.width()*dest.height(), cudaMemcpyHostToDevice);
e_tmp = clock();
paste_time+=mstimer(s_tmp,e_tmp);
cnum++;
//dest.savePGM("/tmp/tmp.pgm");
}
if (finish) break;
}
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
}
cudaFree(usr_var_dev);
cudaFree(prev_dev);
delete prev;
cudaFree(ucand_dev);
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
delete [] candidates;
delete [] dnodes;
delete [] dem_vars;
delete [] variances;
//cudaFree(candidates_dev);
cudaFree(dnodes_dev);
cudaFree(dem_vars_dev);
cudaFree(src_dev);
cudaFree(bigtex_dev);
cerr<<"\n\n*********** Non Feature matching GPU*******************\n";
print_times();
cerr<<" Candidates set: "<<cand_time<<"s\n";
cerr<<" Number of targets: "<<cnum<<"\n";
cerr<<" Number of cands: "<<nsize*(rs+DMIR)<<"\n";
cerr<<"*********** End *******************\n\n";
}
__host__ __device__ float getCost_Feature(float* dest, float* target, float* bigtex, float* dem_vars, node_t* dem_leafs, float* usr_var, node_t* uleafs, cost_t dem, int bsize)
{
float tmp = 0.;
int cx = dem.tnode.x;
int cy = dem.tnode.y;
//Image cand = bigtex.get_crop(dem.tnode.x,dem.tnode.y,dem.tnode.x+(bsize-1),dem.tnode.y+(bsize-1));
//if (use_bend && usr.candleafs.size()>2) tmp+= 100*tps_img(cand,uleafs,dem_leafs,dem.lpos,bsize,dem.lsize);
//if (use_cut) tmp+=graphCut_cost(&dest,&cand,dx,dy);
//if (use_bend && dem.lsize>=3) tmp+= 1000*get_tps(uleafs,dem_leafs,dem.lpos,bsize,dem.lsize) ;
if (use_noisestat) tmp+=0.001*compare_variances(usr_var,dem_vars,dem.vpos,NLEVEL);
if (use_angle) tmp+= 2*getDiffAng(uleafs,dem_leafs,dem.lpos,bsize,dem.lsize);
//cout<<"hi\n";
if (use_profile) tmp+= 5*get_diff_profile(target,bigtex,bsize,cx,uleafs,dem.lsize);
//cout<<"hey\n";
if (use_ssd) tmp+= ssdf(dest,bigtex,bsize,cx);
/*float t = ssdf(dest,bigtex,dsize,bsize,dx,dy,cx,cy);
if (t>0){
cout<<0.01*compare_variances(usr_var,dem_vars,dem.vpos,NLEVEL)<<endl;
cout<<getDiffAng(uleafs,dem_leafs,dem.lpos,bsize,dem.lsize)<<endl;
cout<<get_diff_profile(target,bigtex,bsize,dx,dy,cx,cy,uleafs,dem.lsize)<<endl;
cout<<t/bsize<<endl;
}*/
return tmp;
}
__global__ void ComputeCosts_Feature_Kernel( float* dest, float* target, float* bigtex, cost_t* candidates, int csize, float* dem_vars, node_t* dem_leafs, float* usr_var, node_t* uleafs, cost_t* prev, int bsize, int lsize){
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k<csize){
if ( (!candidates[k].skip) && lsize==candidates[k].lsize && k>5)
{
candidates[k].cost = getCost_Feature(dest, target, bigtex, dem_vars, dem_leafs, usr_var, uleafs, candidates[k], bsize);
}
else
{
candidates[k].cost = INF;
}
}
}
__global__ void buildCandidates_Feature_Kernel(cost_t* candidates, float* bigtex, float* src_ptr, node_t* dnodes, node_t* dem_lsizes, int nsize, int src_w, int src_h, int bsize){
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k<nsize*(360/DROT+ DMIR))
{
int rs = (360/DROT+DMIR);
int kn = k/(rs);
int rt = k%(rs);
bigw = nsize*(360/DROT+ DMIR);
node_t cnode = dnodes[kn];
int mid = bsize/2;
int rx = cnode.x-bsize/2;
int ry = cnode.y-bsize/2;
int rot = rt*DROT;
cost_t c(node_t(rx,ry),0,rot,0,0);
{
//cout<<"hi1\n";
c.cnode = dnodes[kn];
//cout<<"hi2\n";
int kpos = (kn*rs)+rt;
c.lpos = dem_lsizes[kpos].x;
c.lsize = dem_lsizes[kpos].y;
c.vpos = kpos*3;
int cx = kpos; //colaesced memory access
c.tnode = node_t(cx,0);
c.skip =false;
for (int j=0; j<bsize; j++)
for (int i=0; i<bsize; i++)
{
//cout<<cx+i<<"/"<<cy+i<<": "<<cand(i,j)<<endl;
int ri = 0;
int rj = 0;
float candv=0;
if (rt<360/DROT){
float ni=0;
float nj=0;
ni = (rx+mid) + ((i - mid)*cos_int(rot)) + ((j - mid) * sin_int(rot));
nj = (ry+mid) - ((i - mid)*sin_int(rot)) + ((j - mid) * cos_int(rot));
if (ni>=0 && nj>=0 && ni<src_w && nj<src_h)
candv = cubicInterpol(src_ptr,src_w,src_h,ni,nj);
}
else if (rot==360/DROT){
ri = (rx+bsize-i-1);
rj = (ry+j);
if( ri>=0 && rj>=0 && ri<src_w && rj<src_h)
candv = src_ptr[ ri + rj*src_w ];
}
else {
ri = (rx+i);
rj = (ry+bsize-j-1);
if( ri>=0 && rj>=0 && ri<src_w && rj<src_h)
candv = src_ptr[ ri + rj*src_w ];
}
(bigtex)[cx] = candv;
cx += bigw; //colaesced memory access
if (candv<0.0001) c.skip = true;
}
candidates[kpos] = (c);
}
}
}
void match_Feature_bef(Terrain& dest, Tree& usr_features, Tree& dem_features, vector<Image>& tar_pyr, vector<Image>& src_pyr, int bsize)
{
clock_t start_t, end_t, s_tmp, e_tmp;
start_t = clock();
match_time = 0;
paste_time = 0;
get_target = 0;
Image target = usr_features.msource;
Image src = dem_features.msource;
node_list dem_nodes = dem_features.processNodes;
int nsize = dem_nodes.size();
int rs = 360/DROT;
node_t* dnodes = new node_t [nsize];
{
int count=0;
for (node_list::const_iterator it = dem_nodes.begin(); it != dem_nodes.end(); it++ ){
dnodes[count] = *it;
count++;
}
}
int bigwidth = nsize*(rs+DMIR)*bsize*bsize;
int bigheight = 1;
bigw = nsize*(rs+DMIR);
Image big(bigwidth,bigheight);
float* bigtex = big.getPixels();
float* src_ptr = src.getPixels();
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Get pyramid elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
float* variances = get_noise_stats_Feature(src_pyr, dem_nodes, bsize);
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Get noise stats elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
//float* variances = get_noise_stats_Feature(src_pyr, dem_nodes, bsize);
//cost_t* candidates = new cost_t[nsize*rs];
float* dem_vars = new float [nsize*(rs+DMIR)*3];
node_t* dem_leafs = new node_t [nsize*(rs+DMIR)*10];
node_t* dem_lsizes = new node_t [nsize*(rs+DMIR)];
{
int rt = 0;
int rl = 0;
int rv = 0;
int cl=0;
for (int kn=0; kn<nsize; kn++)
{
node_t cnode = dnodes[kn];
for (int rot=0; rot<360; rot+=DROT)
{
vector<node_t> candleafs = getChildren(cnode,dem_features,bsize,rot);
dem_lsizes[rl].x = cl;
dem_lsizes[rl].y = candleafs.size();
cl += dem_lsizes[rl].y;
rl++;
for (unsigned int k=0; k<candleafs.size(); k++)
dem_leafs[rt++]=candleafs[k];
for (int k=0; k<NLEVEL; k++)
dem_vars[rv++] = variances[kn*NLEVEL+k];
}
for (int m=0; m<DMIR; m++){
int mir = m+1;
vector<node_t> candleafs = getChildren(cnode,dem_features,bsize,0,mir);
dem_lsizes[rl].x = cl;
dem_lsizes[rl].y = candleafs.size();
cl += dem_lsizes[rl].y;
rl++;
for (unsigned int k=0; k<candleafs.size(); k++)
dem_leafs[rt++]=candleafs[k];
for (int k=0; k<NLEVEL; k++)
dem_vars[rv++] = variances[kn*NLEVEL+k];
}
}
}
{
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
start_t = clock();
}
float cand_time = 0;
cout<<"Prepare candidates! "<< nsize<<"\n";
start_t = clock();
float* src_dev; cudaMalloc((void**) &src_dev, sizeof(float)*src.getWidth()*src.getHeight());
//float* dest_dev; cudaMalloc((void**) &dest_dev, sizeof(float)*dest.getWidth()*dest.getHeight());
//float* tar_dev; cudaMalloc((void**) &tar_dev, sizeof(float)*target.getWidth()*target.getHeight());
float* bigtex_dev; cudaMalloc((void**) &bigtex_dev, sizeof(float)*bigwidth*bigheight);
float* dem_vars_dev; cudaMalloc((void**) &dem_vars_dev, sizeof(float)*nsize*(rs+DMIR)*3);
thrust::device_vector<cost_t> candidates_dev(nsize*(rs+DMIR));
//cost_t* candidates_dev; cudaMalloc((void**) &candidates_dev, sizeof(cost_t)*nsize*rs);
//cost_t* candidates_dev;
//cudaMalloc((void**) &candidates_dev, sizeof)
node_t* dnodes_dev; cudaMalloc((void**) &dnodes_dev, sizeof(node_t)*nsize);
node_t* dem_lsizes_dev; cudaMalloc((void**) &dem_lsizes_dev, sizeof(node_t)*nsize*(rs+DMIR));
node_t* dem_leafs_dev; cudaMalloc((void**) &dem_leafs_dev, sizeof(node_t)*nsize*(rs+DMIR)*5);
cudaMemcpy(src_dev, src.getPixels(), sizeof(float)*src.getWidth()*src.getHeight(), cudaMemcpyHostToDevice);
// cudaMemcpy(dest_dev, dest.getPixels(), sizeof(float)*dest.getWidth()*dest.getHeight(), cudaMemcpyHostToDevice);
// cudaMemcpy(tar_dev, target.getPixels(), sizeof(float)*target.getWidth()*target.getHeight(), cudaMemcpyHostToDevice);
cudaMemcpy(bigtex_dev, big.getPixels(), sizeof(float)*big.getWidth()*big.getHeight(), cudaMemcpyHostToDevice);
cudaMemcpy(dem_vars_dev, dem_vars, sizeof(float)*nsize*(rs+DMIR)*3, cudaMemcpyHostToDevice);
//cudaMemcpy(candidates_dev, candidates, sizeof(cost_t)*nsize*rs, cudaMemcpyHostToDevice);
cudaMemcpy(dnodes_dev, dnodes, sizeof(node_t)*nsize, cudaMemcpyHostToDevice);
cudaMemcpy(dem_lsizes_dev, dem_lsizes, sizeof(node_t)*nsize*(rs+DMIR), cudaMemcpyHostToDevice);
cudaMemcpy(dem_leafs_dev, dem_leafs, sizeof(node_t)*nsize*(rs+DMIR)*5, cudaMemcpyHostToDevice);
int threadsPerBlock = 256;
int blocksPerGrid = ((nsize) / threadsPerBlock)+1;
{
dim3 dimBlock(32,rs+DMIR);
dim3 dimGrid( (nsize / dimBlock.x)+1,1); //, A.height / dimBlock.y
blocksPerGrid = ((nsize*(rs+DMIR)) / threadsPerBlock)+1;
//buildCandidates_Feature_Kernel<<<dimGrid, dimBlock>>> (thrust::raw_pointer_cast(&candidates_dev[0]), bigtex_dev, src_dev, dnodes_dev, dem_lsizes_dev,nsize, src.getWidth(),src.getHeight(),bsize);
buildCandidates_Feature_Kernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&candidates_dev[0]), bigtex_dev, src_dev, dnodes_dev, dem_lsizes_dev,nsize, src.getWidth(),src.getHeight(),bsize);
//cudaMemcpy(candidates, candidates_dev, sizeof(cost_t)*nsize*rs, cudaMemcpyDeviceToHost);
cudaMemcpy(big.getPixels(), bigtex_dev, sizeof(float)*bigwidth*bigheight, cudaMemcpyDeviceToHost);
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
cand_time += elapsed;
start_t = clock();
}
//big.savePGM("/tmp/bigtmp_Feature.pgm");
delete [] dem_lsizes;
cudaFree(dem_lsizes_dev);
node_list usr_nodes = usr_features.processNodes;
cost_t* prev = new cost_t(node_t(-1,-1),0,0,0,0);
float* usr_var_dev; cudaMalloc((void**) &usr_var_dev,sizeof(float)*NLEVEL);
node_t* uleafs_dev; cudaMalloc((void**) &uleafs_dev,sizeof(node_t)*10);
cost_t* prev_dev; cudaMalloc((void**) &prev_dev,sizeof(cost_t));
float* ucand_dev; cudaMalloc((void**) &ucand_dev,bsize*bsize*sizeof(float));
float* utar_dev; cudaMalloc((void**) &utar_dev,bsize*bsize*sizeof(float));
cout<<"Start matching feature patches! "<<usr_nodes.size()<<" from "<<nsize*(rs+DMIR)<<" candidates\n";
//return;
int c_act = nsize*(rs+DMIR);
int dx,dy;
blocksPerGrid = ((nsize*(rs+DMIR)) / threadsPerBlock)+1;
for (node_list::const_iterator it = usr_nodes.begin(); it != usr_nodes.end(); it++ )
{
//dy=dx;
s_tmp = clock();
node_t cnode = *it;
dx = cnode.x-bsize/2;
dy = cnode.y-bsize/2;
//cout<<"Leafs size: "<<usr_features.getcontrolpts(*it).size()<<endl;
//target.get_crop(dx,dy,dx+(bsize-1),dy+(bsize-1),0,0).savePGM("/tmp/cand_usr.pgm");
vector<node_t> uleafs = getChildren(cnode,usr_features,bsize);
vector<float> usr_var = noise_variances(tar_pyr,dx,dy,bsize);
int lsize = imin(uleafs.size(),10);
Image ucand = dest.get_crop(dx,dy,dx+bsize-1,dy+bsize-1);
Image utar = target.get_crop(dx,dy,dx+bsize-1,dy+bsize-1);
cudaMemcpy(uleafs_dev, &uleafs[0], sizeof(node_t)*lsize, cudaMemcpyHostToDevice);
cudaMemcpy(usr_var_dev, &usr_var[0], sizeof(float)*NLEVEL, cudaMemcpyHostToDevice);
cudaMemcpy(ucand_dev, ucand.getPixels(), sizeof(float)*bsize*bsize, cudaMemcpyHostToDevice);
cudaMemcpy(utar_dev, utar.getPixels(), sizeof(float)*bsize*bsize, cudaMemcpyHostToDevice);
cudaMemcpy(prev_dev, prev, sizeof(cost_t), cudaMemcpyHostToDevice);
//ComputeCosts_noFeature_Kernel<<<blocksPerGrid, threadsPerBlock>>>(ucand_dev, bigtex_dev,thrust::raw_pointer_cast(&candidates_dev[0]) , nsize*rs, dem_vars_dev, usr_var_dev, prev_dev, bsize);
//cout<<"Look for help\n";
ComputeCosts_Feature_Kernel<<<blocksPerGrid, threadsPerBlock>>>(ucand_dev, utar_dev, bigtex_dev,thrust::raw_pointer_cast(&candidates_dev[0]), nsize*(rs+DMIR), dem_vars_dev, dem_leafs_dev, usr_var_dev, uleafs_dev, prev_dev, bsize, lsize);
//cout<<"Got help\n";
thrust::sort(candidates_dev.begin(),candidates_dev.end(),comp);
//cout<<"What now\n";
//cudaMemcpy(candidates, candidates_dev, sizeof(cost_t)*nsize*rs, cudaMemcpyDeviceToHost);
Image patch = findCand_dev(dest,bigtex,candidates_dev,nsize*(rs+DMIR), prev, bsize, dx,dy);
//cout<<prev->org.x<<" "<<prev->org.y<<" "<<prev->rot<<" "<<prev->cost<<endl;
if (prev->rot<360){
Image patch2 = src.get_crop(prev->org.x,prev->org.y,prev->org.x+bsize-1,prev->org.y+bsize-1,prev->rot);
//patch2.savePGM("./tmp/res_cand2.pgm");
patch_merging(&dest, &patch2, dx, dy,1,bsize/10.);
}
else{
Image patch2 = src.get_crop(prev->org.x,prev->org.y,prev->org.x+bsize-1,prev->org.y+bsize-1,0,(prev->rot-360)/DROT+1);
//patch2.savePGM("./tmp/res_cand2.pgm");
patch_merging(&dest, &patch2, dx, dy,1,bsize/10.);
}
//patch.savePGM("./tmp/res_cand1.pgm");
//cin.get();
//cudaMemcpy(prev_dev, prev, sizeof(cost_t), cudaMemcpyHostToDevice);
e_tmp = clock();
match_time+=mstimer(s_tmp,e_tmp);
s_tmp = clock();
//patch_merging(&dest, &patch, dx, dy,1,bsize/10.);
e_tmp = clock();
paste_time+=mstimer(s_tmp,e_tmp);
//dest.savePGM("/tmp/res_tmp_cpu.pgm",dest.maxval);
//dest.saveTerragen("/tmp/res_tmp_cpu.ter");
//cin.get();
}
cudaFree(usr_var_dev);
cudaFree(uleafs_dev);
cudaFree(prev_dev);
cudaFree(ucand_dev);
cudaFree(utar_dev);
delete prev;
end_t = clock();
float elapsed = ((float)( end_t - start_t )) /CLOCKS_PER_SEC;
cout<<"Elapsed time: "<<elapsed<<" s.\n";
//delete [] candidates;
delete [] dnodes;
delete [] dem_vars;
delete [] variances;
delete [] dem_leafs;
//cudaFree(candidates_dev);
cudaFree(dnodes_dev);
cudaFree(dem_vars_dev);
cudaFree(dem_leafs_dev);
cudaFree(src_dev);
//cudaFree(dest_dev);
//cudaFree(tar_dev);
cudaFree(bigtex_dev);
cerr<<"\n\n*********** Feature matching GPU*******************\n";
print_times();
cerr<<" Candidates set: "<<cand_time<<"s\n";
cerr<<" Number of targets: "<<usr_nodes.size()<<"\n";
cerr<<" Number of cands: "<<nsize*(rs+DMIR)<<"\n";
cerr<<"*********** End *******************\n\n";
}
|
d6d282acdfd78b1300cedc65b26534ca329829a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void vector_cosh (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
y[offset_y + gid * stride_y] = CAST(cosh)(x[offset_x + gid * stride_x]);
}
} | d6d282acdfd78b1300cedc65b26534ca329829a9.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void vector_cosh (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
y[offset_y + gid * stride_y] = CAST(cosh)(x[offset_x + gid * stride_x]);
}
} |
f01e61cdb754207121697d13b5e71173004e1480.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
#define TILE_WIDTH 16
/* #define TOTAL_LEN 64000 */
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
float Pvalue = 0.0;
for(unsigned i = 0; i < (int)(ceil((float)M.width / TILE_WIDTH)); ++i) {
if(i * TILE_WIDTH + threadIdx.x < M.width && row < M.height)
Mds[threadIdx.y][threadIdx.x] = M.elements[row * M.width + i * TILE_WIDTH + threadIdx.x];
else
Mds[threadIdx.y][threadIdx.x] = 0.0;
if(i * TILE_WIDTH + threadIdx.y < N.height && col < N.width)
Nds[threadIdx.y][threadIdx.x] = N.elements[(i * TILE_WIDTH + threadIdx.y) * N.width + col];
else
Nds[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for(unsigned j = 0; j < TILE_WIDTH; ++j) {
Pvalue += Mds[threadIdx.y][j] * Nds[j][threadIdx.x];
__syncthreads();
}
if(row < P.height && col < P.width)
P.elements[row*P.width + col] = Pvalue;
}
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| f01e61cdb754207121697d13b5e71173004e1480.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
#define TILE_WIDTH 16
/* #define TOTAL_LEN 64000 */
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
float Pvalue = 0.0;
for(unsigned i = 0; i < (int)(ceil((float)M.width / TILE_WIDTH)); ++i) {
if(i * TILE_WIDTH + threadIdx.x < M.width && row < M.height)
Mds[threadIdx.y][threadIdx.x] = M.elements[row * M.width + i * TILE_WIDTH + threadIdx.x];
else
Mds[threadIdx.y][threadIdx.x] = 0.0;
if(i * TILE_WIDTH + threadIdx.y < N.height && col < N.width)
Nds[threadIdx.y][threadIdx.x] = N.elements[(i * TILE_WIDTH + threadIdx.y) * N.width + col];
else
Nds[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for(unsigned j = 0; j < TILE_WIDTH; ++j) {
Pvalue += Mds[threadIdx.y][j] * Nds[j][threadIdx.x];
__syncthreads();
}
if(row < P.height && col < P.width)
P.elements[row*P.width + col] = Pvalue;
}
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
e34092c60e6a37439ced48e83e81ff717315aca8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CUDA blur
* Kevin Yuh, 2014
* Revised by Nailen Matschke, 2016
* Revised by Loko Kung, 2018
*/
#include "blur.cuh"
#include <cstdio>
#include <hip/hip_runtime.h>
#include "cuda_header.cuh"
/*
* NOTE: You can use this macro to easily check cuda error codes
* and get more information.
*
* Modified from:
* http://stackoverflow.com/questions/14038589/
* what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
*/
#define gpu_errchk(ans) { gpu_assert((ans), __FILE__, __LINE__); }
inline void gpu_assert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "gpu_assert: %s %s %d\n",
hipGetErrorString(code), file, line);
exit(code);
}
}
CUDA_CALLABLE
void cuda_blur_kernel_convolution(uint thread_index, const float* gpu_raw_data,
const float* gpu_blur_v, float* gpu_out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// TODO: Implement the necessary convolution function that should be
// completed for each thread_index. Use the CPU implementation in
// blur.cpp as a reference.
if (thread_index < blur_v_size) {
for (int j = 0; j <= thread_index; j++) {
gpu_out_data[thread_index] += gpu_raw_data[thread_index - j] * gpu_blur_v[j];
}
} else {
for (int j = 0; j < blur_v_size; j++) {
gpu_out_data[thread_index] += gpu_raw_data[thread_index - j] * gpu_blur_v[j];
}
}
}
__global__
void cuda_blur_kernel(const float *gpu_raw_data, const float *gpu_blur_v,
float *gpu_out_data, int n_frames, int blur_v_size) {
// TODO: Compute the current thread index.
uint thread_index = blockDim.x * blockIdx.x + threadIdx.x;
uint total_threads = gridDim.x * blockDim.x;
// TODO: Update the while loop to handle all indices for this thread.
// Remember to advance the index as necessary.
while (thread_index < n_frames) {
// Do computation for this thread index
cuda_blur_kernel_convolution(thread_index, gpu_raw_data,
gpu_blur_v, gpu_out_data,
n_frames, blur_v_size);
// TODO: Update the thread index
thread_index += total_threads;
}
}
float cuda_call_blur_kernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// Use the CUDA machinery for recording time
hipEvent_t start_gpu, stop_gpu;
float time_milli = -1;
hipEventCreate(&start_gpu);
hipEventCreate(&stop_gpu);
hipEventRecord(start_gpu);
// TODO: Allocate GPU memory for the raw input data (either audio file
// data or randomly generated data. The data is of type float and
// has n_frames elements. Then copy the data in raw_data into the
// GPU memory you allocated.
float* gpu_raw_data;
gpu_errchk(hipMalloc(&gpu_raw_data, sizeof(float) * n_frames));
gpu_errchk(hipMemcpy(gpu_raw_data, raw_data, sizeof(float) * n_frames, hipMemcpyHostToDevice));
// TODO: Allocate GPU memory for the impulse signal (for now global GPU
// memory is fine. The data is of type float and has blur_v_size
// elements. Then copy the data in blur_v into the GPU memory you
// allocated.
float* gpu_blur_v;
gpu_errchk(hipMalloc(&gpu_blur_v, sizeof(float) * blur_v_size));
gpu_errchk(hipMemcpy(gpu_blur_v, blur_v, sizeof(float) * blur_v_size, hipMemcpyHostToDevice));
// TODO: Allocate GPU memory to store the output audio signal after the
// convolution. The data is of type float and has n_frames elements.
// Initialize the data as necessary.
float* gpu_out_data;
gpu_errchk(hipMalloc(&gpu_out_data, sizeof(float) * n_frames));
// TODO: Appropriately call the kernel function.
hipLaunchKernelGGL(( cuda_blur_kernel), dim3(blocks), dim3(threads_per_block), 0, 0, gpu_raw_data, gpu_blur_v, gpu_out_data, n_frames, blur_v_size);
// Check for errors on kernel call
hipError_t err = hipGetLastError();
if (hipSuccess != err)
fprintf(stderr, "Error %s\n", hipGetErrorString(err));
else
fprintf(stderr, "No kernel error detected\n");
// TODO: Now that kernel calls have finished, copy the output signal
// back from the GPU to host memory. (We store this channel's result
// in out_data on the host.)
gpu_errchk(hipMemcpy(out_data, gpu_out_data, sizeof(float) * n_frames, hipMemcpyDeviceToHost));
// TODO: Now that we have finished our computations on the GPU, free the
// GPU resources.
gpu_errchk(hipFree(gpu_raw_data));
gpu_errchk(hipFree(gpu_blur_v));
gpu_errchk(hipFree(gpu_out_data));
// Stop the recording timer and return the computation time
hipEventRecord(stop_gpu);
hipEventSynchronize(stop_gpu);
hipEventElapsedTime(&time_milli, start_gpu, stop_gpu);
return time_milli;
}
| e34092c60e6a37439ced48e83e81ff717315aca8.cu | /*
* CUDA blur
* Kevin Yuh, 2014
* Revised by Nailen Matschke, 2016
* Revised by Loko Kung, 2018
*/
#include "blur.cuh"
#include <cstdio>
#include <cuda_runtime.h>
#include "cuda_header.cuh"
/*
* NOTE: You can use this macro to easily check cuda error codes
* and get more information.
*
* Modified from:
* http://stackoverflow.com/questions/14038589/
* what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
*/
#define gpu_errchk(ans) { gpu_assert((ans), __FILE__, __LINE__); }
inline void gpu_assert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "gpu_assert: %s %s %d\n",
cudaGetErrorString(code), file, line);
exit(code);
}
}
CUDA_CALLABLE
void cuda_blur_kernel_convolution(uint thread_index, const float* gpu_raw_data,
const float* gpu_blur_v, float* gpu_out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// TODO: Implement the necessary convolution function that should be
// completed for each thread_index. Use the CPU implementation in
// blur.cpp as a reference.
if (thread_index < blur_v_size) {
for (int j = 0; j <= thread_index; j++) {
gpu_out_data[thread_index] += gpu_raw_data[thread_index - j] * gpu_blur_v[j];
}
} else {
for (int j = 0; j < blur_v_size; j++) {
gpu_out_data[thread_index] += gpu_raw_data[thread_index - j] * gpu_blur_v[j];
}
}
}
__global__
void cuda_blur_kernel(const float *gpu_raw_data, const float *gpu_blur_v,
float *gpu_out_data, int n_frames, int blur_v_size) {
// TODO: Compute the current thread index.
uint thread_index = blockDim.x * blockIdx.x + threadIdx.x;
uint total_threads = gridDim.x * blockDim.x;
// TODO: Update the while loop to handle all indices for this thread.
// Remember to advance the index as necessary.
while (thread_index < n_frames) {
// Do computation for this thread index
cuda_blur_kernel_convolution(thread_index, gpu_raw_data,
gpu_blur_v, gpu_out_data,
n_frames, blur_v_size);
// TODO: Update the thread index
thread_index += total_threads;
}
}
float cuda_call_blur_kernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// Use the CUDA machinery for recording time
cudaEvent_t start_gpu, stop_gpu;
float time_milli = -1;
cudaEventCreate(&start_gpu);
cudaEventCreate(&stop_gpu);
cudaEventRecord(start_gpu);
// TODO: Allocate GPU memory for the raw input data (either audio file
// data or randomly generated data. The data is of type float and
// has n_frames elements. Then copy the data in raw_data into the
// GPU memory you allocated.
float* gpu_raw_data;
gpu_errchk(cudaMalloc(&gpu_raw_data, sizeof(float) * n_frames));
gpu_errchk(cudaMemcpy(gpu_raw_data, raw_data, sizeof(float) * n_frames, cudaMemcpyHostToDevice));
// TODO: Allocate GPU memory for the impulse signal (for now global GPU
// memory is fine. The data is of type float and has blur_v_size
// elements. Then copy the data in blur_v into the GPU memory you
// allocated.
float* gpu_blur_v;
gpu_errchk(cudaMalloc(&gpu_blur_v, sizeof(float) * blur_v_size));
gpu_errchk(cudaMemcpy(gpu_blur_v, blur_v, sizeof(float) * blur_v_size, cudaMemcpyHostToDevice));
// TODO: Allocate GPU memory to store the output audio signal after the
// convolution. The data is of type float and has n_frames elements.
// Initialize the data as necessary.
float* gpu_out_data;
gpu_errchk(cudaMalloc(&gpu_out_data, sizeof(float) * n_frames));
// TODO: Appropriately call the kernel function.
cuda_blur_kernel<<<blocks, threads_per_block>>>(gpu_raw_data, gpu_blur_v, gpu_out_data, n_frames, blur_v_size);
// Check for errors on kernel call
cudaError err = cudaGetLastError();
if (cudaSuccess != err)
fprintf(stderr, "Error %s\n", cudaGetErrorString(err));
else
fprintf(stderr, "No kernel error detected\n");
// TODO: Now that kernel calls have finished, copy the output signal
// back from the GPU to host memory. (We store this channel's result
// in out_data on the host.)
gpu_errchk(cudaMemcpy(out_data, gpu_out_data, sizeof(float) * n_frames, cudaMemcpyDeviceToHost));
// TODO: Now that we have finished our computations on the GPU, free the
// GPU resources.
gpu_errchk(cudaFree(gpu_raw_data));
gpu_errchk(cudaFree(gpu_blur_v));
gpu_errchk(cudaFree(gpu_out_data));
// Stop the recording timer and return the computation time
cudaEventRecord(stop_gpu);
cudaEventSynchronize(stop_gpu);
cudaEventElapsedTime(&time_milli, start_gpu, stop_gpu);
return time_milli;
}
|
67152fe4a8509da915a1790f8e2f5bdbea838462.hip | // !!! This is a file automatically generated by hipify!!!
/**
*
* Date 03/07/2009
* ====
*
* Authors Vincent Garcia
* ======= Eric Debreuve
* Michel Barlaud
*
* Description Given a reference point set and a query point set, the program returns
* =========== firts the distance between each query point and its k nearest neighbors in
* the reference point set, and second the indexes of these k nearest neighbors.
* The computation is performed using the API NVIDIA CUDA.
*
* Paper Fast k nearest neighbor search using GPU
* =====
*
* BibTeX @INPROCEEDINGS{2008_garcia_cvgpu,
* ====== author = {V. Garcia and E. Debreuve and M. Barlaud},
* title = {Fast k nearest neighbor search using GPU},
* booktitle = {CVPR Workshop on Computer Vision on GPU},
* year = {2008},
* address = {Anchorage, Alaska, USA},
* month = {June}
* }
*
*/
// If the code is used in Matlab, set MATLAB_CODE to 1. Otherwise, set MATLAB_CODE to 0.
#define MATLAB_CODE 1
// Includes
#include <stdio.h>
#include <math.h>
#include "hip/hip_runtime.h"
#if MATLAB_CODE == 1
#include "mex.h"
#else
#include <time.h>
#endif
// Constants used by the program
#define MAX_PITCH_VALUE_IN_BYTES 262144
#define MAX_TEXTURE_WIDTH_IN_BYTES 65536
#define MAX_TEXTURE_HEIGHT_IN_BYTES 32768
#define MAX_PART_OF_FREE_MEMORY_USED 0.9
#define BLOCK_DIM 16
// Texture containing the reference points (if it is possible)
texture<float, 2, hipReadModeElementType> texA;
//-----------------------------------------------------------------------------------------------//
// KERNELS //
//-----------------------------------------------------------------------------------------------//
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
* The matrix A is a texture.
*
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param pB pitch of matrix B given in number of columns
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceTexture(int wA, float * B, int wB, int pB, int dim, float* AB){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if ( xIndex<wB && yIndex<wA ){
float ssd = 0;
for (int i=0; i<dim; i++){
float tmp = tex2D(texA, (float)yIndex, (float)i) - B[ i * pB + xIndex ];
ssd += tmp * tmp;
}
AB[yIndex * pB + xIndex] = ssd;
}
}
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param pA pitch of matrix A given in number of columns
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param pB pitch of matrix B given in number of columns
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceGlobal( float* A, int wA, int pA, float* B, int wB, int pB, int dim, float* AB){
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * pA;
step_B = BLOCK_DIM * pB;
end_A = begin_A + (dim-1) * pA;
// Conditions
int cond0 = (begin_A + tx < wA); // used to write in shared memory
int cond1 = (begin_B + tx < wB); // used to write in shared memory & to computations and to write in output matrix
int cond2 = (begin_A + ty < wA); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/pA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
}
else{
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1){
for (int k = 0; k < BLOCK_DIM; ++k){
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1)
AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}
/**
* Gathers k-th smallest distances for each column of the distance matrix in the top.
*
* @param dist distance matrix
* @param dist_pitch pitch of the distance matrix given in number of columns
* @param ind index matrix
* @param ind_pitch pitch of the index matrix given in number of columns
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix and of the index matrix
* @param k number of neighbors to consider
*/
__global__ void cuInsertionSort(float *dist, int dist_pitch, int *ind, int ind_pitch, int width, int height, int k){
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 1;
// Part 1 : sort kth firt elementZ
for (l=1; l<k; l++){
curr_row = l * dist_pitch;
curr_dist = p_dist[curr_row];
if (curr_dist<max_dist){
i=l-1;
for (int a=0; a<l-1; a++){
if (p_dist[a*dist_pitch]>curr_dist){
i=a;
break;
}
}
for (j=l; j>i; j--){
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_ind[j*ind_pitch] = p_ind[(j-1)*ind_pitch];
}
p_dist[i*dist_pitch] = curr_dist;
p_ind[i*ind_pitch] = l+1;
}
else
p_ind[l*ind_pitch] = l+1;
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k-1)*dist_pitch;
for (l=k; l<height; l++){
curr_dist = p_dist[l*dist_pitch];
if (curr_dist<max_dist){
i=k-1;
for (int a=0; a<k-1; a++){
if (p_dist[a*dist_pitch]>curr_dist){
i=a;
break;
}
}
for (j=k-1; j>i; j--){
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_ind[j*ind_pitch] = p_ind[(j-1)*ind_pitch];
}
p_dist[i*dist_pitch] = curr_dist;
p_ind[i*ind_pitch] = l+1;
max_dist = p_dist[max_row];
}
}
}
}
/**
* Computes the square root of the first line (width-th first element)
* of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param pitch pitch of the distance matrix given in number of columns
* @param k number of neighbors to consider
*/
__global__ void cuParallelSqrt(float *dist, int width, int pitch, int k){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*pitch + xIndex] = sqrt(dist[yIndex*pitch + xIndex]);
}
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* Prints the error message return during the memory allocation.
*
* @param error error value return by the memory allocation function
* @param memorySize size of memory tried to be allocated
*/
void printErrorMessage(hipError_t error, int memorySize){
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", hipGetErrorString(error));
printf("Whished allocated memory : %d\n", memorySize);
printf("==================================================\n");
#if MATLAB_CODE == 1
mexErrMsgTxt("CUDA ERROR DURING MEMORY ALLOCATION");
#endif
}
/**
* K nearest neighbor algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy point sets (reference and query points) from host to device memory
* - Compute the distances + indexes to the k nearest neighbors for each query point
* - Copy distances from device to host memory
*
* @param ref_host reference points ; pointer to linear matrix
* @param ref_width number of reference points ; width of the matrix
* @param query_host query points ; pointer to linear matrix
* @param query_width number of query points ; width of the matrix
* @param height dimension of points ; height of the matrices
* @param k number of neighbor to consider
* @param dist_host distances to k nearest neighbors ; pointer to linear matrix
* @param dist_host indexes of the k nearest neighbors ; pointer to linear matrix
*
*/
void knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host, int* ind_host){
unsigned int size_of_float = sizeof(float);
unsigned int size_of_int = sizeof(int);
// Variables
float *query_dev;
float *ref_dev;
float *dist_dev;
int *ind_dev;
hipArray *ref_array;
hipError_t result;
size_t query_pitch;
size_t query_pitch_in_bytes;
size_t ref_pitch;
size_t ref_pitch_in_bytes;
size_t ind_pitch;
size_t ind_pitch_in_bytes;
size_t max_nb_query_traited;
size_t actual_nb_query_width;
unsigned int memory_total;
unsigned int memory_free;
// Check if we can use texture memory for reference points
unsigned int use_texture = ( ref_width*size_of_float<=MAX_TEXTURE_WIDTH_IN_BYTES && height*size_of_float<=MAX_TEXTURE_HEIGHT_IN_BYTES );
// CUDA Initialisation
hipInit(0);
// Check free memory using driver API ; only (MAX_PART_OF_FREE_MEMORY_USED*100)% of memory will be used
hipCtx_t cuContext;
hipDevice_t cuDevice=0;
hipCtxCreate(&cuContext, 0, cuDevice);
cuMemGetInfo(&memory_free, &memory_total);
hipCtxDetach (cuContext);
// Determine maximum number of query that can be treated
max_nb_query_traited = ( memory_free * MAX_PART_OF_FREE_MEMORY_USED - size_of_float * ref_width*height ) / ( size_of_float * (height + ref_width) + size_of_int * k);
max_nb_query_traited = min( query_width, (max_nb_query_traited / 16) * 16 );
// Allocation of global memory for query points and for distances
result = hipMallocPitch( (void **) &query_dev, &query_pitch_in_bytes, max_nb_query_traited * size_of_float, height + ref_width);
if (result){
printErrorMessage(result, max_nb_query_traited*size_of_float*(height+ref_width));
return;
}
query_pitch = query_pitch_in_bytes/size_of_float;
dist_dev = query_dev + height * query_pitch;
// Allocation of global memory for indexes
result = hipMallocPitch( (void **) &ind_dev, &ind_pitch_in_bytes, max_nb_query_traited * size_of_int, k);
if (result){
hipFree(query_dev);
printErrorMessage(result, max_nb_query_traited*size_of_int*k);
return;
}
ind_pitch = ind_pitch_in_bytes/size_of_int;
// Allocation of memory (global or texture) for reference points
if (use_texture){
// Allocation of texture memory
hipChannelFormatDesc channelDescA = hipCreateChannelDesc<float>();
result = hipMallocArray( &ref_array, &channelDescA, ref_width, height );
if (result){
printErrorMessage(result, ref_width*height*size_of_float);
hipFree(ind_dev);
hipFree(query_dev);
return;
}
hipMemcpyToArray( ref_array, 0, 0, ref_host, ref_width * height * size_of_float, hipMemcpyHostToDevice );
// Set texture parameters and bind texture to array
texA.addressMode[0] = hipAddressModeClamp;
texA.addressMode[1] = hipAddressModeClamp;
texA.filterMode = hipFilterModePoint;
texA.normalized = 0;
hipBindTextureToArray(texA, ref_array);
}
else{
// Allocation of global memory
result = hipMallocPitch( (void **) &ref_dev, &ref_pitch_in_bytes, ref_width * size_of_float, height);
if (result){
printErrorMessage(result, ref_width*size_of_float*height);
hipFree(ind_dev);
hipFree(query_dev);
return;
}
ref_pitch = ref_pitch_in_bytes/size_of_float;
hipMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*size_of_float, ref_width*size_of_float, height, hipMemcpyHostToDevice);
}
// Split queries to fit in GPU memory
for (int i=0; i<query_width; i+=max_nb_query_traited){
// Number of query points considered
actual_nb_query_width = min( max_nb_query_traited, query_width-i );
// Copy of part of query actually being treated
hipMemcpy2D(query_dev, query_pitch_in_bytes, &query_host[i], query_width*size_of_float, actual_nb_query_width*size_of_float, height, hipMemcpyHostToDevice);
// Grids ans threads
dim3 g_16x16(actual_nb_query_width/16, ref_width/16, 1);
dim3 t_16x16(16, 16, 1);
if (actual_nb_query_width%16 != 0) g_16x16.x += 1;
if (ref_width %16 != 0) g_16x16.y += 1;
//
dim3 g_256x1(actual_nb_query_width/256, 1, 1);
dim3 t_256x1(256, 1, 1);
if (actual_nb_query_width%256 != 0) g_256x1.x += 1;
//
dim3 g_k_16x16(actual_nb_query_width/16, k/16, 1);
dim3 t_k_16x16(16, 16, 1);
if (actual_nb_query_width%16 != 0) g_k_16x16.x += 1;
if (k %16 != 0) g_k_16x16.y += 1;
// Kernel 1: Compute all the distances
if (use_texture)
hipLaunchKernelGGL(( cuComputeDistanceTexture), dim3(g_16x16),dim3(t_16x16), 0, 0, ref_width, query_dev, actual_nb_query_width, query_pitch, height, dist_dev);
else
hipLaunchKernelGGL(( cuComputeDistanceGlobal), dim3(g_16x16),dim3(t_16x16), 0, 0, ref_dev, ref_width, ref_pitch, query_dev, actual_nb_query_width, query_pitch, height, dist_dev);
// Kernel 2: Sort each column
hipLaunchKernelGGL(( cuInsertionSort), dim3(g_256x1),dim3(t_256x1), 0, 0, dist_dev, query_pitch, ind_dev, ind_pitch, actual_nb_query_width, ref_width, k);
// Kernel 3: Compute square root of k first elements
hipLaunchKernelGGL(( cuParallelSqrt), dim3(g_k_16x16),dim3(t_k_16x16), 0, 0, dist_dev, query_width, query_pitch, k);
// Memory copy of output from device to host
hipMemcpy2D(&dist_host[i], query_width*size_of_float, dist_dev, query_pitch_in_bytes, actual_nb_query_width*size_of_float, k, hipMemcpyDeviceToHost);
hipMemcpy2D(&ind_host[i], query_width*size_of_int, ind_dev, ind_pitch_in_bytes, actual_nb_query_width*size_of_int, k, hipMemcpyDeviceToHost);
}
// Free memory
if (use_texture)
hipFreeArray(ref_array);
else
hipFree(ref_dev);
hipFree(ind_dev);
hipFree(query_dev);
}
//-----------------------------------------------------------------------------------------------//
// MATLAB INTERFACE & C EXAMPLE //
//-----------------------------------------------------------------------------------------------//
#if MATLAB_CODE == 1
/**
* Interface to use CUDA code in Matlab (gateway routine).
*
* @param nlhs Number of expected mxArrays (Left Hand Side)
* @param plhs Array of pointers to expected outputs
* @param nrhs Number of inputs (Right Hand Side)
* @param prhs Array of pointers to input data. The input data is read-only and should not be altered by your mexFunction .
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
// Variables
float* ref;
int ref_width;
int ref_height;
float* query;
int query_width;
int query_height;
float* dist;
int* ind;
int k;
// Reference points
ref = (float *) mxGetData(prhs[0]);
ref_width = mxGetM(prhs[0]);
ref_height = mxGetN(prhs[0]);
// Query points
query = (float *) mxGetData(prhs[1]);
query_width = mxGetM(prhs[1]);
query_height = mxGetN(prhs[1]);
// Number of neighbors to consider
k = (int)mxGetScalar(prhs[2]);
// Verification of the reference point and query point sizes
if (ref_height!=query_height)
mexErrMsgTxt("Data must have the same dimension");
if (ref_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES)
mexErrMsgTxt("Reference number is too large for CUDA (Max=65536)");
if (query_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES)
mexErrMsgTxt("Query number is too large for CUDA (Max=65536)");
// Allocation of output arrays
dist = (float *) mxGetPr(plhs[0] = mxCreateNumericMatrix(query_width, k, mxSINGLE_CLASS, mxREAL));
ind = (int *) mxGetPr(plhs[1] = mxCreateNumericMatrix(query_width, k, mxINT32_CLASS, mxREAL));
// Call KNN CUDA
knn(ref, ref_width, query, query_width, ref_height, k, dist, ind);
}
#else // C code
/**
* Example of use of kNN search CUDA.
*/
int main(void){
// Variables and parameters
float* ref; // Pointer to reference point array
float* query; // Pointer to query point array
float* dist; // Pointer to distance array
int* ind; // Pointer to index array
int ref_nb = 4096; // Reference point number, max=65535
int query_nb = 4096; // Query point number, max=65535
int dim = 32; // Dimension of points
int k = 20; // Nearest neighbors to consider
int iterations = 100;
int i;
// Memory allocation
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(query_nb * k * sizeof(float));
ind = (int *) malloc(query_nb * k * sizeof(float));
// Init
srand(time(NULL));
for (i=0 ; i<ref_nb * dim ; i++) ref[i] = (float)rand() / (float)RAND_MAX;
for (i=0 ; i<query_nb * dim ; i++) query[i] = (float)rand() / (float)RAND_MAX;
// Variables for duration evaluation
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsed_time;
// Display informations
printf("Number of reference points : %6d\n", ref_nb );
printf("Number of query points : %6d\n", query_nb);
printf("Dimension of points : %4d\n", dim );
printf("Number of neighbors to consider : %4d\n", k );
printf("Processing kNN search :" );
// Call kNN search CUDA
hipEventRecord(start, 0);
for (i=0; i<iterations; i++)
knn(ref, ref_nb, query, query_nb, dim, k, dist, ind);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time/1000, iterations, elapsed_time/(iterations*1000));
// Destroy cuda event object and free memory
hipEventDestroy(start);
hipEventDestroy(stop);
free(ind);
free(dist);
free(query);
free(ref);
}
#endif
| 67152fe4a8509da915a1790f8e2f5bdbea838462.cu | /**
*
* Date 03/07/2009
* ====
*
* Authors Vincent Garcia
* ======= Eric Debreuve
* Michel Barlaud
*
* Description Given a reference point set and a query point set, the program returns
* =========== firts the distance between each query point and its k nearest neighbors in
* the reference point set, and second the indexes of these k nearest neighbors.
* The computation is performed using the API NVIDIA CUDA.
*
* Paper Fast k nearest neighbor search using GPU
* =====
*
* BibTeX @INPROCEEDINGS{2008_garcia_cvgpu,
* ====== author = {V. Garcia and E. Debreuve and M. Barlaud},
* title = {Fast k nearest neighbor search using GPU},
* booktitle = {CVPR Workshop on Computer Vision on GPU},
* year = {2008},
* address = {Anchorage, Alaska, USA},
* month = {June}
* }
*
*/
// If the code is used in Matlab, set MATLAB_CODE to 1. Otherwise, set MATLAB_CODE to 0.
#define MATLAB_CODE 1
// Includes
#include <stdio.h>
#include <math.h>
#include "cuda.h"
#if MATLAB_CODE == 1
#include "mex.h"
#else
#include <time.h>
#endif
// Constants used by the program
#define MAX_PITCH_VALUE_IN_BYTES 262144
#define MAX_TEXTURE_WIDTH_IN_BYTES 65536
#define MAX_TEXTURE_HEIGHT_IN_BYTES 32768
#define MAX_PART_OF_FREE_MEMORY_USED 0.9
#define BLOCK_DIM 16
// Texture containing the reference points (if it is possible)
texture<float, 2, cudaReadModeElementType> texA;
//-----------------------------------------------------------------------------------------------//
// KERNELS //
//-----------------------------------------------------------------------------------------------//
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
* The matrix A is a texture.
*
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param pB pitch of matrix B given in number of columns
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceTexture(int wA, float * B, int wB, int pB, int dim, float* AB){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if ( xIndex<wB && yIndex<wA ){
float ssd = 0;
for (int i=0; i<dim; i++){
float tmp = tex2D(texA, (float)yIndex, (float)i) - B[ i * pB + xIndex ];
ssd += tmp * tmp;
}
AB[yIndex * pB + xIndex] = ssd;
}
}
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param pA pitch of matrix A given in number of columns
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param pB pitch of matrix B given in number of columns
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceGlobal( float* A, int wA, int pA, float* B, int wB, int pB, int dim, float* AB){
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * pA;
step_B = BLOCK_DIM * pB;
end_A = begin_A + (dim-1) * pA;
// Conditions
int cond0 = (begin_A + tx < wA); // used to write in shared memory
int cond1 = (begin_B + tx < wB); // used to write in shared memory & to computations and to write in output matrix
int cond2 = (begin_A + ty < wA); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/pA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
}
else{
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1){
for (int k = 0; k < BLOCK_DIM; ++k){
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1)
AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}
/**
* Gathers k-th smallest distances for each column of the distance matrix in the top.
*
* @param dist distance matrix
* @param dist_pitch pitch of the distance matrix given in number of columns
* @param ind index matrix
* @param ind_pitch pitch of the index matrix given in number of columns
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix and of the index matrix
* @param k number of neighbors to consider
*/
__global__ void cuInsertionSort(float *dist, int dist_pitch, int *ind, int ind_pitch, int width, int height, int k){
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 1;
// Part 1 : sort kth firt elementZ
for (l=1; l<k; l++){
curr_row = l * dist_pitch;
curr_dist = p_dist[curr_row];
if (curr_dist<max_dist){
i=l-1;
for (int a=0; a<l-1; a++){
if (p_dist[a*dist_pitch]>curr_dist){
i=a;
break;
}
}
for (j=l; j>i; j--){
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_ind[j*ind_pitch] = p_ind[(j-1)*ind_pitch];
}
p_dist[i*dist_pitch] = curr_dist;
p_ind[i*ind_pitch] = l+1;
}
else
p_ind[l*ind_pitch] = l+1;
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k-1)*dist_pitch;
for (l=k; l<height; l++){
curr_dist = p_dist[l*dist_pitch];
if (curr_dist<max_dist){
i=k-1;
for (int a=0; a<k-1; a++){
if (p_dist[a*dist_pitch]>curr_dist){
i=a;
break;
}
}
for (j=k-1; j>i; j--){
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_ind[j*ind_pitch] = p_ind[(j-1)*ind_pitch];
}
p_dist[i*dist_pitch] = curr_dist;
p_ind[i*ind_pitch] = l+1;
max_dist = p_dist[max_row];
}
}
}
}
/**
* Computes the square root of the first line (width-th first element)
* of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param pitch pitch of the distance matrix given in number of columns
* @param k number of neighbors to consider
*/
__global__ void cuParallelSqrt(float *dist, int width, int pitch, int k){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*pitch + xIndex] = sqrt(dist[yIndex*pitch + xIndex]);
}
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* Prints the error message return during the memory allocation.
*
* @param error error value return by the memory allocation function
* @param memorySize size of memory tried to be allocated
*/
void printErrorMessage(cudaError_t error, int memorySize){
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", cudaGetErrorString(error));
printf("Whished allocated memory : %d\n", memorySize);
printf("==================================================\n");
#if MATLAB_CODE == 1
mexErrMsgTxt("CUDA ERROR DURING MEMORY ALLOCATION");
#endif
}
/**
* K nearest neighbor algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy point sets (reference and query points) from host to device memory
* - Compute the distances + indexes to the k nearest neighbors for each query point
* - Copy distances from device to host memory
*
* @param ref_host reference points ; pointer to linear matrix
* @param ref_width number of reference points ; width of the matrix
* @param query_host query points ; pointer to linear matrix
* @param query_width number of query points ; width of the matrix
* @param height dimension of points ; height of the matrices
* @param k number of neighbor to consider
* @param dist_host distances to k nearest neighbors ; pointer to linear matrix
* @param dist_host indexes of the k nearest neighbors ; pointer to linear matrix
*
*/
void knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host, int* ind_host){
unsigned int size_of_float = sizeof(float);
unsigned int size_of_int = sizeof(int);
// Variables
float *query_dev;
float *ref_dev;
float *dist_dev;
int *ind_dev;
cudaArray *ref_array;
cudaError_t result;
size_t query_pitch;
size_t query_pitch_in_bytes;
size_t ref_pitch;
size_t ref_pitch_in_bytes;
size_t ind_pitch;
size_t ind_pitch_in_bytes;
size_t max_nb_query_traited;
size_t actual_nb_query_width;
unsigned int memory_total;
unsigned int memory_free;
// Check if we can use texture memory for reference points
unsigned int use_texture = ( ref_width*size_of_float<=MAX_TEXTURE_WIDTH_IN_BYTES && height*size_of_float<=MAX_TEXTURE_HEIGHT_IN_BYTES );
// CUDA Initialisation
cuInit(0);
// Check free memory using driver API ; only (MAX_PART_OF_FREE_MEMORY_USED*100)% of memory will be used
CUcontext cuContext;
CUdevice cuDevice=0;
cuCtxCreate(&cuContext, 0, cuDevice);
cuMemGetInfo(&memory_free, &memory_total);
cuCtxDetach (cuContext);
// Determine maximum number of query that can be treated
max_nb_query_traited = ( memory_free * MAX_PART_OF_FREE_MEMORY_USED - size_of_float * ref_width*height ) / ( size_of_float * (height + ref_width) + size_of_int * k);
max_nb_query_traited = min( query_width, (max_nb_query_traited / 16) * 16 );
// Allocation of global memory for query points and for distances
result = cudaMallocPitch( (void **) &query_dev, &query_pitch_in_bytes, max_nb_query_traited * size_of_float, height + ref_width);
if (result){
printErrorMessage(result, max_nb_query_traited*size_of_float*(height+ref_width));
return;
}
query_pitch = query_pitch_in_bytes/size_of_float;
dist_dev = query_dev + height * query_pitch;
// Allocation of global memory for indexes
result = cudaMallocPitch( (void **) &ind_dev, &ind_pitch_in_bytes, max_nb_query_traited * size_of_int, k);
if (result){
cudaFree(query_dev);
printErrorMessage(result, max_nb_query_traited*size_of_int*k);
return;
}
ind_pitch = ind_pitch_in_bytes/size_of_int;
// Allocation of memory (global or texture) for reference points
if (use_texture){
// Allocation of texture memory
cudaChannelFormatDesc channelDescA = cudaCreateChannelDesc<float>();
result = cudaMallocArray( &ref_array, &channelDescA, ref_width, height );
if (result){
printErrorMessage(result, ref_width*height*size_of_float);
cudaFree(ind_dev);
cudaFree(query_dev);
return;
}
cudaMemcpyToArray( ref_array, 0, 0, ref_host, ref_width * height * size_of_float, cudaMemcpyHostToDevice );
// Set texture parameters and bind texture to array
texA.addressMode[0] = cudaAddressModeClamp;
texA.addressMode[1] = cudaAddressModeClamp;
texA.filterMode = cudaFilterModePoint;
texA.normalized = 0;
cudaBindTextureToArray(texA, ref_array);
}
else{
// Allocation of global memory
result = cudaMallocPitch( (void **) &ref_dev, &ref_pitch_in_bytes, ref_width * size_of_float, height);
if (result){
printErrorMessage(result, ref_width*size_of_float*height);
cudaFree(ind_dev);
cudaFree(query_dev);
return;
}
ref_pitch = ref_pitch_in_bytes/size_of_float;
cudaMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*size_of_float, ref_width*size_of_float, height, cudaMemcpyHostToDevice);
}
// Split queries to fit in GPU memory
for (int i=0; i<query_width; i+=max_nb_query_traited){
// Number of query points considered
actual_nb_query_width = min( max_nb_query_traited, query_width-i );
// Copy of part of query actually being treated
cudaMemcpy2D(query_dev, query_pitch_in_bytes, &query_host[i], query_width*size_of_float, actual_nb_query_width*size_of_float, height, cudaMemcpyHostToDevice);
// Grids ans threads
dim3 g_16x16(actual_nb_query_width/16, ref_width/16, 1);
dim3 t_16x16(16, 16, 1);
if (actual_nb_query_width%16 != 0) g_16x16.x += 1;
if (ref_width %16 != 0) g_16x16.y += 1;
//
dim3 g_256x1(actual_nb_query_width/256, 1, 1);
dim3 t_256x1(256, 1, 1);
if (actual_nb_query_width%256 != 0) g_256x1.x += 1;
//
dim3 g_k_16x16(actual_nb_query_width/16, k/16, 1);
dim3 t_k_16x16(16, 16, 1);
if (actual_nb_query_width%16 != 0) g_k_16x16.x += 1;
if (k %16 != 0) g_k_16x16.y += 1;
// Kernel 1: Compute all the distances
if (use_texture)
cuComputeDistanceTexture<<<g_16x16,t_16x16>>>(ref_width, query_dev, actual_nb_query_width, query_pitch, height, dist_dev);
else
cuComputeDistanceGlobal<<<g_16x16,t_16x16>>>(ref_dev, ref_width, ref_pitch, query_dev, actual_nb_query_width, query_pitch, height, dist_dev);
// Kernel 2: Sort each column
cuInsertionSort<<<g_256x1,t_256x1>>>(dist_dev, query_pitch, ind_dev, ind_pitch, actual_nb_query_width, ref_width, k);
// Kernel 3: Compute square root of k first elements
cuParallelSqrt<<<g_k_16x16,t_k_16x16>>>(dist_dev, query_width, query_pitch, k);
// Memory copy of output from device to host
cudaMemcpy2D(&dist_host[i], query_width*size_of_float, dist_dev, query_pitch_in_bytes, actual_nb_query_width*size_of_float, k, cudaMemcpyDeviceToHost);
cudaMemcpy2D(&ind_host[i], query_width*size_of_int, ind_dev, ind_pitch_in_bytes, actual_nb_query_width*size_of_int, k, cudaMemcpyDeviceToHost);
}
// Free memory
if (use_texture)
cudaFreeArray(ref_array);
else
cudaFree(ref_dev);
cudaFree(ind_dev);
cudaFree(query_dev);
}
//-----------------------------------------------------------------------------------------------//
// MATLAB INTERFACE & C EXAMPLE //
//-----------------------------------------------------------------------------------------------//
#if MATLAB_CODE == 1
/**
* Interface to use CUDA code in Matlab (gateway routine).
*
* @param nlhs Number of expected mxArrays (Left Hand Side)
* @param plhs Array of pointers to expected outputs
* @param nrhs Number of inputs (Right Hand Side)
* @param prhs Array of pointers to input data. The input data is read-only and should not be altered by your mexFunction .
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
// Variables
float* ref;
int ref_width;
int ref_height;
float* query;
int query_width;
int query_height;
float* dist;
int* ind;
int k;
// Reference points
ref = (float *) mxGetData(prhs[0]);
ref_width = mxGetM(prhs[0]);
ref_height = mxGetN(prhs[0]);
// Query points
query = (float *) mxGetData(prhs[1]);
query_width = mxGetM(prhs[1]);
query_height = mxGetN(prhs[1]);
// Number of neighbors to consider
k = (int)mxGetScalar(prhs[2]);
// Verification of the reference point and query point sizes
if (ref_height!=query_height)
mexErrMsgTxt("Data must have the same dimension");
if (ref_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES)
mexErrMsgTxt("Reference number is too large for CUDA (Max=65536)");
if (query_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES)
mexErrMsgTxt("Query number is too large for CUDA (Max=65536)");
// Allocation of output arrays
dist = (float *) mxGetPr(plhs[0] = mxCreateNumericMatrix(query_width, k, mxSINGLE_CLASS, mxREAL));
ind = (int *) mxGetPr(plhs[1] = mxCreateNumericMatrix(query_width, k, mxINT32_CLASS, mxREAL));
// Call KNN CUDA
knn(ref, ref_width, query, query_width, ref_height, k, dist, ind);
}
#else // C code
/**
* Example of use of kNN search CUDA.
*/
int main(void){
// Variables and parameters
float* ref; // Pointer to reference point array
float* query; // Pointer to query point array
float* dist; // Pointer to distance array
int* ind; // Pointer to index array
int ref_nb = 4096; // Reference point number, max=65535
int query_nb = 4096; // Query point number, max=65535
int dim = 32; // Dimension of points
int k = 20; // Nearest neighbors to consider
int iterations = 100;
int i;
// Memory allocation
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(query_nb * k * sizeof(float));
ind = (int *) malloc(query_nb * k * sizeof(float));
// Init
srand(time(NULL));
for (i=0 ; i<ref_nb * dim ; i++) ref[i] = (float)rand() / (float)RAND_MAX;
for (i=0 ; i<query_nb * dim ; i++) query[i] = (float)rand() / (float)RAND_MAX;
// Variables for duration evaluation
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsed_time;
// Display informations
printf("Number of reference points : %6d\n", ref_nb );
printf("Number of query points : %6d\n", query_nb);
printf("Dimension of points : %4d\n", dim );
printf("Number of neighbors to consider : %4d\n", k );
printf("Processing kNN search :" );
// Call kNN search CUDA
cudaEventRecord(start, 0);
for (i=0; i<iterations; i++)
knn(ref, ref_nb, query, query_nb, dim, k, dist, ind);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time/1000, iterations, elapsed_time/(iterations*1000));
// Destroy cuda event object and free memory
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(ind);
free(dist);
free(query);
free(ref);
}
#endif
|
3a2badbd9dc682c91347d394741bad0c9e43c45e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
//#include <wb.h>
#include <stdint.h>
#include <ctype.h>
#include <stdio.h>
#include <unistd.h>
#include <iostream>
#include "../../hamc/hamc_cpu_code.c"
#include "../../hamc/MultiplyMatrixTesting.cu"
#define TILE_WIDTH 16
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess){
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
void printHelp()
{
printf("run this executable with the following flags\n");
printf("\n");
printf("\t-a <input0 file name>\n");
printf("\t-b <input1 file name>\n");
printf("\t-e <expected solution file name>\n");
printf("\t-o <output file name>\n");
printf("\t-c \n");
}
bin_matrix run_cpu(bin_matrix A, bin_matrix B)
{
return matrix_mult_cpu(A, B);
}
void run_time(int x, int y)
{
clock_t start, end;
double time_used;
bool matched = true;
printf("Matrix dimension: %dX%d\n", x, y);
HAMC_DATA_TYPE_t *dataA = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
HAMC_DATA_TYPE_t *dataB = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
for(int i = 0; i < x * y; i++){
dataA[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
dataB[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
bin_matrix A = mat_init_cpu(x, y);
bin_matrix B = mat_init_cpu(y, x);
A->data = dataA;
B->data = dataB;
start = clock();
bin_matrix C = run_cpu(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "CPU time: " << time_used << std::endl;
start = clock();
bin_matrix G = run_mult_kernel_test(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "GPU time: " << time_used << std::endl;
for(int i = 0; i < C->rows * C->cols; i++){
if((C->rows != G->rows) || (C->cols != G->cols)){
if(C->rows != G->rows){
printf("Row size doesn't match.\n");
}
if(C->cols != G->cols){
printf("Col size doesn't match.\n");
}
matched = false;
break;
}
if(C->data[i] != G->data[i]){
printf("Index failed at: %d\n", i);
matched = false;
break;
}
}
printf("Matched: %s", matched ? "true" : "false");
free(C);
free(G);
}
void run_profile_og(int x, int y)
{
printf("Matrix size: %dX%d\n", x, y);
HAMC_DATA_TYPE_t *dataA = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
HAMC_DATA_TYPE_t *dataB = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
for(int i = 0; i < x * y; i++){
dataA[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
dataB[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
bin_matrix A = mat_init_cpu(x, y);
bin_matrix B = mat_init_cpu(y, x);
A->data = dataA;
B->data = dataB;
bin_matrix C = run_mult_kernel(A, B);
free(C);
}
void run_profile_op(int x, int y)
{
printf("Matrix size: %dX%d\n", x, y);
HAMC_DATA_TYPE_t *dataA = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
HAMC_DATA_TYPE_t *dataB = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
for(int i = 0; i < x * y; i++){
dataA[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
dataB[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
bin_matrix A = mat_init_cpu(x, y);
bin_matrix B = mat_init_cpu(y, x);
A->data = dataA;
B->data = dataB;
bin_matrix C = run_mult_kernel_test(A, B);
free(C);
}
void run_gpu_vers(int x, int y, int z)
{
clock_t start, end;
double time_used;
bool matched = true;
printf("Matrix dimensions: %dX%d, %dX%d\n", x, y, y, z);
HAMC_DATA_TYPE_t *dataA = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
HAMC_DATA_TYPE_t *dataB = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
for(int i = 0; i < x * y; i++){
dataA[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
for(int i = 0; i < z * y; i++){
dataB[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
bin_matrix A = mat_init_cpu(x, y);
bin_matrix B = mat_init_cpu(y, z);
A->data = dataA;
B->data = dataB;
start = clock();
bin_matrix G1 = run_mult_kernel(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "GPU V1 time: " << time_used << std::endl;
/*for(int i = 0; i < G1->rows; i++){
for(int j = 0; j < G1->cols; j++){
printf("%d:", G1->data[i * G1->cols + j]);
}
printf("\n");
}/**/
start = clock();
bin_matrix G2 = run_mult_kernel_test(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "GPU V2 time: " << time_used << std::endl;
for(int i = 0; i < G1->rows * G1->cols; i++){
if((G1->rows != G2->rows) || (G1->cols != G2->cols)){
if(G1->rows != G2->rows){
printf("Row size doesn't match.\n");
}
if(G1->cols != G2->cols){
printf("Col size doesn't match.\n");
}
matched = false;
break;
}
if(G1->data[i] != G2->data[i]){
printf("Index failed at: %d\n", i);
matched = false;
break;
}
}
for(int i = 0; i < G1->rows; i++){
for(int j = 0; j < G1->cols; j++){
printf("%d:", G1->data[i * G1->cols + j]);
printf("%d, ", G2->data[i * G1->cols + j]);
}
printf("\n");
}/**/
printf("Matched: %s", matched ? "true" : "false");
free(G1);
free(G2);
}
void run_tile_sweep(int x, int y, int upto)
{
clock_t start, end;
double time_used;
HAMC_DATA_TYPE_t *dataA = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
HAMC_DATA_TYPE_t *dataB = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
for(int i = 0; i < x * y; i++){
dataA[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
dataB[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
bin_matrix A = mat_init_cpu(x, y);
bin_matrix B = mat_init_cpu(y, x);
bin_matrix C = mat_init_cpu(x, y);
A->data = dataA;
B->data = dataB;
for(int i = 4; i <= upto; i *= 2){
start = clock();
//C = run_mult_kernel(A, B, i);
C = run_mult_kernel_test(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "GPU time: " << time_used << std::endl;
}
}
void run_size_sweep()
{
}
void run_debug(int x, int y)
{
clock_t start, end;
double time_used;
bool matched = true;
printf("Matrix dimensions: %dX%d\n", x, y);
HAMC_DATA_TYPE_t *dataA = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
HAMC_DATA_TYPE_t *dataB = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
for(int i = 0; i < x * y; i++){
dataA[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
dataB[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
bin_matrix A = mat_init_cpu(x, y);
bin_matrix B = mat_init_cpu(y, x);
A->data = dataA;
B->data = dataB;
start = clock();
bin_matrix G1 = run_mult_kernel_debug(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "GPU V1 time: " << time_used << std::endl;
/*for(int i = 0; i < G1->rows; i++){
for(int j = 0; j < G1->cols; j++){
printf("%d:", G1->data[i * G1->cols + j]);
}
printf("\n");
}/**/
start = clock();
bin_matrix G2 = run_mult_kernel_test(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "GPU V2 time: " << time_used << std::endl;
for(int i = 0; i < G1->rows; i++){
for(int j = 0; j < G1->cols; j++){
if((G1->rows != G2->rows) || (G1->cols != G2->cols)){
if(G1->rows != G2->rows){
printf("Row size doesn't match.\n");
}
if(G1->cols != G2->cols){
printf("Col size doesn't match.\n");
}
matched = false;
break;
}
if(G1->data[i * G1->cols + j] != G2->data[i * G2->cols + j]){
printf("Index failed at: [%d,%d]\n", i, j);
matched = false;
i = 999999;
break;
}
}
}
/*for(int i = 0; i < G1->rows; i++){
for(int j = 0; j < G1->cols; j++){
printf("%d:", G1->data[i * G1->cols + j]);
printf("%d, ", G2->data[i * G1->cols + j]);
}
printf("\n");
}/**/
printf("Matched: %s\n", matched ? "true" : "false");
free(G1);
free(G2);
}
int main(int argc, char *argv[])
{
//wbArg_t args;
bin_matrix A;
bin_matrix B;
bin_matrix C;
int numRowsA;
int numColsA;
int numRowsB;
int numColsB;
int numRowsS;
int numColsS;
int x, y, z, upto, p;
HAMC_DATA_TYPE_t *hostA;
HAMC_DATA_TYPE_t *hostB;
HAMC_DATA_TYPE_t *sol;
char *input0;
char *input1;
char *expected;
bool cpu_exec = false;
bool trial_time = false;
bool sweep_tile_test = false;
bool debug_test = false;
bool gpu_profile = false;
bool gpu_V_test = false;
bool solved = true;
int opt;
while ((opt = getopt(argc, argv, "a:b:e:o:cts:p:gdx:y:z:h")) != -1){
switch(opt){
case 'a':
input0 = strdup(optarg);
break;
case 'b':
input1 = strdup(optarg);
break;
case 'e':
expected = strdup(optarg);
break;
case 'o':
//input0 = strdup(optarg);
break;
case 'c':
cpu_exec = true;
break;
case 't':
trial_time = true;
break;
case 's':
sweep_tile_test = true;
upto = atoi(optarg);
break;
case 'p':
gpu_profile = true;
p = atoi(optarg);
break;
case 'g':
gpu_V_test = true;
break;
case 'd':
debug_test = true;
break;
case 'x':
x = atoi(optarg);
break;
case 'y':
y = atoi(optarg);
break;
case 'z':
z = atoi(optarg);
break;
/*case 'u':
upto = atoi(optarg);
break;*/
case 'h':
default:
printHelp();
return 0;
}
}
if(trial_time){
run_time(x, y);
return 0;
}
if(sweep_tile_test){
run_tile_sweep(x, y, upto);
return 0;
}
if(gpu_profile){
if(p == 0) run_profile_og(x, y);
if(p == 1) run_profile_op(x, y);
return 0;
}
if(gpu_V_test){
run_gpu_vers(x, y, z);
return 0;
}
if(debug_test){
run_debug(x, y);
return 0;
}
/*float *floatTemp = (float *)wbImport(input0, &numRowsA, &numColsA);
hostA = (HAMC_DATA_TYPE_t *)malloc(numRowsA*numColsA * sizeof(HAMC_DATA_TYPE_t));
for(int i = 0; i < numColsA * numRowsA; i++){
hostA[i] = (HAMC_DATA_TYPE_t)floatTemp[i];
}
A = mat_init_cpu(numRowsA, numColsA);
A->data = hostA;
floatTemp = (float *)wbImport(input1, &numRowsB, &numColsB);
hostB = (HAMC_DATA_TYPE_t *)malloc(numRowsB*numColsB * sizeof(HAMC_DATA_TYPE_t));
for(int i = 0; i < numColsB * numRowsB; i++){
hostB[i] = (HAMC_DATA_TYPE_t)floatTemp[i];
}
B = mat_init_cpu(numRowsB, numColsB);
B->data = hostB;
floatTemp = (float *)wbImport(expected, &numRowsS, &numColsS);
sol = (HAMC_DATA_TYPE_t *)malloc(numRowsS*numColsS * sizeof(HAMC_DATA_TYPE_t));
for(int i = 0; i < numColsB * numRowsB; i++){
sol[i] = (HAMC_DATA_TYPE_t)floatTemp[i];
}
if(cpu_exec){
C = run_cpu(A, B);
}
else{
std::cout << "Running Kernel" << std::endl;
C = run_mult_kernel(A, B, 16);
}
if(C->rows != numRowsS && C->cols != numColsS){
solved = false;
}
else{
for(int i = 0; i < numRowsS * numColsS; i++){
if(C->data[i] != sol[i]){
solved = false;
break;
}
}
}
std::cout << "solved: " << solved << std::endl;
free(A);
free(B);
free(C);/**/
return 0;
}
| 3a2badbd9dc682c91347d394741bad0c9e43c45e.cu | #include <cuda_runtime.h>
#include <stdlib.h>
//#include <wb.h>
#include <stdint.h>
#include <ctype.h>
#include <stdio.h>
#include <unistd.h>
#include <iostream>
#include "../../hamc/hamc_cpu_code.c"
#include "../../hamc/MultiplyMatrixTesting.cu"
#define TILE_WIDTH 16
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess){
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
void printHelp()
{
printf("run this executable with the following flags\n");
printf("\n");
printf("\t-a <input0 file name>\n");
printf("\t-b <input1 file name>\n");
printf("\t-e <expected solution file name>\n");
printf("\t-o <output file name>\n");
printf("\t-c \n");
}
bin_matrix run_cpu(bin_matrix A, bin_matrix B)
{
return matrix_mult_cpu(A, B);
}
void run_time(int x, int y)
{
clock_t start, end;
double time_used;
bool matched = true;
printf("Matrix dimension: %dX%d\n", x, y);
HAMC_DATA_TYPE_t *dataA = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
HAMC_DATA_TYPE_t *dataB = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
for(int i = 0; i < x * y; i++){
dataA[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
dataB[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
bin_matrix A = mat_init_cpu(x, y);
bin_matrix B = mat_init_cpu(y, x);
A->data = dataA;
B->data = dataB;
start = clock();
bin_matrix C = run_cpu(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "CPU time: " << time_used << std::endl;
start = clock();
bin_matrix G = run_mult_kernel_test(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "GPU time: " << time_used << std::endl;
for(int i = 0; i < C->rows * C->cols; i++){
if((C->rows != G->rows) || (C->cols != G->cols)){
if(C->rows != G->rows){
printf("Row size doesn't match.\n");
}
if(C->cols != G->cols){
printf("Col size doesn't match.\n");
}
matched = false;
break;
}
if(C->data[i] != G->data[i]){
printf("Index failed at: %d\n", i);
matched = false;
break;
}
}
printf("Matched: %s", matched ? "true" : "false");
free(C);
free(G);
}
void run_profile_og(int x, int y)
{
printf("Matrix size: %dX%d\n", x, y);
HAMC_DATA_TYPE_t *dataA = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
HAMC_DATA_TYPE_t *dataB = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
for(int i = 0; i < x * y; i++){
dataA[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
dataB[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
bin_matrix A = mat_init_cpu(x, y);
bin_matrix B = mat_init_cpu(y, x);
A->data = dataA;
B->data = dataB;
bin_matrix C = run_mult_kernel(A, B);
free(C);
}
void run_profile_op(int x, int y)
{
printf("Matrix size: %dX%d\n", x, y);
HAMC_DATA_TYPE_t *dataA = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
HAMC_DATA_TYPE_t *dataB = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
for(int i = 0; i < x * y; i++){
dataA[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
dataB[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
bin_matrix A = mat_init_cpu(x, y);
bin_matrix B = mat_init_cpu(y, x);
A->data = dataA;
B->data = dataB;
bin_matrix C = run_mult_kernel_test(A, B);
free(C);
}
void run_gpu_vers(int x, int y, int z)
{
clock_t start, end;
double time_used;
bool matched = true;
printf("Matrix dimensions: %dX%d, %dX%d\n", x, y, y, z);
HAMC_DATA_TYPE_t *dataA = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
HAMC_DATA_TYPE_t *dataB = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
for(int i = 0; i < x * y; i++){
dataA[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
for(int i = 0; i < z * y; i++){
dataB[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
bin_matrix A = mat_init_cpu(x, y);
bin_matrix B = mat_init_cpu(y, z);
A->data = dataA;
B->data = dataB;
start = clock();
bin_matrix G1 = run_mult_kernel(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "GPU V1 time: " << time_used << std::endl;
/*for(int i = 0; i < G1->rows; i++){
for(int j = 0; j < G1->cols; j++){
printf("%d:", G1->data[i * G1->cols + j]);
}
printf("\n");
}/**/
start = clock();
bin_matrix G2 = run_mult_kernel_test(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "GPU V2 time: " << time_used << std::endl;
for(int i = 0; i < G1->rows * G1->cols; i++){
if((G1->rows != G2->rows) || (G1->cols != G2->cols)){
if(G1->rows != G2->rows){
printf("Row size doesn't match.\n");
}
if(G1->cols != G2->cols){
printf("Col size doesn't match.\n");
}
matched = false;
break;
}
if(G1->data[i] != G2->data[i]){
printf("Index failed at: %d\n", i);
matched = false;
break;
}
}
for(int i = 0; i < G1->rows; i++){
for(int j = 0; j < G1->cols; j++){
printf("%d:", G1->data[i * G1->cols + j]);
printf("%d, ", G2->data[i * G1->cols + j]);
}
printf("\n");
}/**/
printf("Matched: %s", matched ? "true" : "false");
free(G1);
free(G2);
}
void run_tile_sweep(int x, int y, int upto)
{
clock_t start, end;
double time_used;
HAMC_DATA_TYPE_t *dataA = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
HAMC_DATA_TYPE_t *dataB = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
for(int i = 0; i < x * y; i++){
dataA[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
dataB[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
bin_matrix A = mat_init_cpu(x, y);
bin_matrix B = mat_init_cpu(y, x);
bin_matrix C = mat_init_cpu(x, y);
A->data = dataA;
B->data = dataB;
for(int i = 4; i <= upto; i *= 2){
start = clock();
//C = run_mult_kernel(A, B, i);
C = run_mult_kernel_test(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "GPU time: " << time_used << std::endl;
}
}
void run_size_sweep()
{
}
void run_debug(int x, int y)
{
clock_t start, end;
double time_used;
bool matched = true;
printf("Matrix dimensions: %dX%d\n", x, y);
HAMC_DATA_TYPE_t *dataA = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
HAMC_DATA_TYPE_t *dataB = (HAMC_DATA_TYPE_t *)malloc(sizeof(HAMC_DATA_TYPE_t) * x * y);
for(int i = 0; i < x * y; i++){
dataA[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
dataB[i] = (HAMC_DATA_TYPE_t)(rand() % 2);
}
bin_matrix A = mat_init_cpu(x, y);
bin_matrix B = mat_init_cpu(y, x);
A->data = dataA;
B->data = dataB;
start = clock();
bin_matrix G1 = run_mult_kernel_debug(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "GPU V1 time: " << time_used << std::endl;
/*for(int i = 0; i < G1->rows; i++){
for(int j = 0; j < G1->cols; j++){
printf("%d:", G1->data[i * G1->cols + j]);
}
printf("\n");
}/**/
start = clock();
bin_matrix G2 = run_mult_kernel_test(A, B);
end = clock();
time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "GPU V2 time: " << time_used << std::endl;
for(int i = 0; i < G1->rows; i++){
for(int j = 0; j < G1->cols; j++){
if((G1->rows != G2->rows) || (G1->cols != G2->cols)){
if(G1->rows != G2->rows){
printf("Row size doesn't match.\n");
}
if(G1->cols != G2->cols){
printf("Col size doesn't match.\n");
}
matched = false;
break;
}
if(G1->data[i * G1->cols + j] != G2->data[i * G2->cols + j]){
printf("Index failed at: [%d,%d]\n", i, j);
matched = false;
i = 999999;
break;
}
}
}
/*for(int i = 0; i < G1->rows; i++){
for(int j = 0; j < G1->cols; j++){
printf("%d:", G1->data[i * G1->cols + j]);
printf("%d, ", G2->data[i * G1->cols + j]);
}
printf("\n");
}/**/
printf("Matched: %s\n", matched ? "true" : "false");
free(G1);
free(G2);
}
int main(int argc, char *argv[])
{
//wbArg_t args;
bin_matrix A;
bin_matrix B;
bin_matrix C;
int numRowsA;
int numColsA;
int numRowsB;
int numColsB;
int numRowsS;
int numColsS;
int x, y, z, upto, p;
HAMC_DATA_TYPE_t *hostA;
HAMC_DATA_TYPE_t *hostB;
HAMC_DATA_TYPE_t *sol;
char *input0;
char *input1;
char *expected;
bool cpu_exec = false;
bool trial_time = false;
bool sweep_tile_test = false;
bool debug_test = false;
bool gpu_profile = false;
bool gpu_V_test = false;
bool solved = true;
int opt;
while ((opt = getopt(argc, argv, "a:b:e:o:cts:p:gdx:y:z:h")) != -1){
switch(opt){
case 'a':
input0 = strdup(optarg);
break;
case 'b':
input1 = strdup(optarg);
break;
case 'e':
expected = strdup(optarg);
break;
case 'o':
//input0 = strdup(optarg);
break;
case 'c':
cpu_exec = true;
break;
case 't':
trial_time = true;
break;
case 's':
sweep_tile_test = true;
upto = atoi(optarg);
break;
case 'p':
gpu_profile = true;
p = atoi(optarg);
break;
case 'g':
gpu_V_test = true;
break;
case 'd':
debug_test = true;
break;
case 'x':
x = atoi(optarg);
break;
case 'y':
y = atoi(optarg);
break;
case 'z':
z = atoi(optarg);
break;
/*case 'u':
upto = atoi(optarg);
break;*/
case 'h':
default:
printHelp();
return 0;
}
}
if(trial_time){
run_time(x, y);
return 0;
}
if(sweep_tile_test){
run_tile_sweep(x, y, upto);
return 0;
}
if(gpu_profile){
if(p == 0) run_profile_og(x, y);
if(p == 1) run_profile_op(x, y);
return 0;
}
if(gpu_V_test){
run_gpu_vers(x, y, z);
return 0;
}
if(debug_test){
run_debug(x, y);
return 0;
}
/*float *floatTemp = (float *)wbImport(input0, &numRowsA, &numColsA);
hostA = (HAMC_DATA_TYPE_t *)malloc(numRowsA*numColsA * sizeof(HAMC_DATA_TYPE_t));
for(int i = 0; i < numColsA * numRowsA; i++){
hostA[i] = (HAMC_DATA_TYPE_t)floatTemp[i];
}
A = mat_init_cpu(numRowsA, numColsA);
A->data = hostA;
floatTemp = (float *)wbImport(input1, &numRowsB, &numColsB);
hostB = (HAMC_DATA_TYPE_t *)malloc(numRowsB*numColsB * sizeof(HAMC_DATA_TYPE_t));
for(int i = 0; i < numColsB * numRowsB; i++){
hostB[i] = (HAMC_DATA_TYPE_t)floatTemp[i];
}
B = mat_init_cpu(numRowsB, numColsB);
B->data = hostB;
floatTemp = (float *)wbImport(expected, &numRowsS, &numColsS);
sol = (HAMC_DATA_TYPE_t *)malloc(numRowsS*numColsS * sizeof(HAMC_DATA_TYPE_t));
for(int i = 0; i < numColsB * numRowsB; i++){
sol[i] = (HAMC_DATA_TYPE_t)floatTemp[i];
}
if(cpu_exec){
C = run_cpu(A, B);
}
else{
std::cout << "Running Kernel" << std::endl;
C = run_mult_kernel(A, B, 16);
}
if(C->rows != numRowsS && C->cols != numColsS){
solved = false;
}
else{
for(int i = 0; i < numRowsS * numColsS; i++){
if(C->data[i] != sol[i]){
solved = false;
break;
}
}
}
std::cout << "solved: " << solved << std::endl;
free(A);
free(B);
free(C);/**/
return 0;
}
|
b3c09879b72a9d15b8aeab3a5d367ecf2f8fbdac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/perception/inference/utils/resize.h"
#include <algorithm>
#include "cyber/common/log.h"
#include "modules/perception/inference/utils/util.h"
#include "modules/perception/inference/utils/cuda_util.h"
namespace apollo {
namespace perception {
namespace inference {
__global__ void resize_linear_kernel(const unsigned char *src,
float *dst,
int channel,
int height,
int width,
int stepwidth,
int dst_height,
int dst_width,
float fx,
float fy) {
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;//(blockdst_width/32) * (dst_height /8)
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;//Id(32*8=256)
if (dst_x < dst_width && dst_y < dst_height) {
float src_x = (dst_x + 0.5) * fx - 0.5;//fx = (origin_width) / (width);
float src_y = (dst_y + 0.5) * fy - 0.5;
const int x1 = __float2int_rd(src_x);
const int y1 = __float2int_rd(src_y);
const int x1_read = max(x1, 0);
const int y1_read = max(y1, 0);
const int x2 = x1 + 1;
const int y2 = y1 + 1;
const int x2_read = min(x2, width - 1);
const int y2_read = min(y2, height - 1);
int src_reg = 0;
for (int c = 0; c < channel; c++) {
float out = 0;
int idx11 = (y1_read * stepwidth + x1_read) * channel;
src_reg = src[idx11 + c];
out = out + (x2 - src_x) * (y2 - src_y) * src_reg;
int idx12 = (y1_read * stepwidth + x2_read) * channel;
src_reg = src[idx12 + c];
out = out + src_reg * (src_x - x1) * (y2 - src_y);
int idx21 = (y2_read * stepwidth + x1_read) * channel;
src_reg = src[idx21 + c];
out = out + src_reg * (x2 - src_x) * (src_y - y1);
int idx22 = (y2_read * stepwidth + x2_read) * channel;
src_reg = src[idx22 + c];
out = out + src_reg * (src_x - x1) * (src_y - y1);
if (out < 0) {
out = 0;
}
if (out > 255) {
out = 255;
}
int dst_idx = (dst_y * dst_width + dst_x) * channel + c;
dst[dst_idx] = out;
}
}
}
int divup(int a, int b) {
if (a % b) {
return a / b + 1;
} else {
return a / b;
}
}
template<typename T>
__global__ void resize_linear_kernel_mean(const unsigned char *src,
float *dst,
int channel,
int height,
int width,
int stepwidth,
int dst_height,
int dst_width,
float fx,
float fy,
T mean_b,
T mean_g,
T mean_r,
bool channel_axis,
float scale) {
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dst_x < dst_width && dst_y < dst_height) {
float src_x = (dst_x + 0.5) * fx - 0.5;
float src_y = (dst_y + 0.5) * fy - 0.5;
const int x1 = __float2int_rd(src_x);
const int y1 = __float2int_rd(src_y);
const int x1_read = max(x1, 0);
const int y1_read = max(y1, 0);
const int x2 = x1 + 1;
const int y2 = y1 + 1;
const int x2_read = min(x2, width - 1);
const int y2_read = min(y2, height - 1);
// (h*width+w)*channel+c
int src_reg = 0;
for (int c = 0; c < channel; c++) {
float out = 0;
int idx11 = (y1_read * stepwidth + x1_read) * channel;
src_reg = src[idx11 + c];
out = out + (x2 - src_x) * (y2 - src_y) * src_reg;
int idx12 = (y1_read * stepwidth + x2_read) * channel;
src_reg = src[idx12 + c];
out = out + src_reg * (src_x - x1) * (y2 - src_y);
int idx21 = (y2_read * stepwidth + x1_read) * channel;
src_reg = src[idx21 + c];
out = out + src_reg * (x2 - src_x) * (src_y - y1);
int idx22 = (y2_read * stepwidth + x2_read) * channel;
src_reg = src[idx22 + c];
out = out + src_reg * (src_x - x1) * (src_y - y1);
if (out < 0) {
out = 0;
}
if (out > 255) {
out = 255;
}
int dst_idx;
if (channel_axis) {
dst_idx = (dst_y * dst_width + dst_x) * channel + c;
} else {
dst_idx = (c * dst_height + dst_y) * dst_width + dst_x;
}
// printf("%f %d %d %d %d %d %d %d\n",out,x1,y1,x2,y2,c,dst_y,dst_x);
// dst[dst_idx] = (out - mean[c]) * scale;
if (c == 0) {
dst[dst_idx] = (out - mean_b) * scale;
} else if (c == 1) {
dst[dst_idx] = (out - mean_g) * scale;
} else if (c == 2) {
dst[dst_idx] = (out - mean_r) * scale;
}
}
}
}
bool ResizeGPU(const base::Image8U &src,
std::shared_ptr<apollo::perception::base::Blob<float> > dst,
int stepwidth,
int start_axis) {
int width = dst->shape(2);
int height = dst->shape(1);
int channel = dst->shape(3);
int origin_channel = src.channels();
int origin_height = src.rows();
int origin_width = src.cols();
if (origin_channel != dst->shape(3)) {
AERROR << "channel should be the same after resize.";
return false;
}
float fx = static_cast<float>(origin_width) / static_cast<float>(width);
float fy = static_cast<float>(origin_height) / static_cast<float>(height);
const dim3 block(32, 8);// cudablock grid
const dim3 grid(divup(width, block.x), divup(height, block.y));//block.x =32 grid(width/32,height/8)
resize_linear_kernel << < grid, block >> >
(src.gpu_data(), dst->mutable_gpu_data(),
origin_channel, origin_height, origin_width,
stepwidth, height, width, fx, fy); //kernal
return true;
}
bool ResizeGPU(const apollo::perception::base::Blob<uint8_t> &src_gpu,
std::shared_ptr<apollo::perception::base::Blob<float> > dst,
int stepwidth,
int start_axis,
int mean_b,
int mean_g,
int mean_r,
bool channel_axis,
float scale) {
int width = dst->shape(2);
int height = dst->shape(1);
int channel = dst->shape(3);
int origin_channel = src_gpu.shape(3);
int origin_height = src_gpu.shape(1);
int origin_width = src_gpu.shape(2);
if (!channel_axis) {
// channel_axis: false
// SRC: 1 H W C
// DST: 1 C H W
width = dst->shape(3);
height = dst->shape(2);
channel = dst->shape(1);
}
// channel_axis: true
// SRC: 1 H W C
// DST: 1 H W C
if (origin_channel != channel) {
AERROR << "channel should be the same after resize.";
return false;
}
float fx = static_cast<float>(origin_width) / static_cast<float>(width);
float fy = static_cast<float>(origin_height) / static_cast<float>(height);
const dim3 block(32, 8);
const dim3 grid(divup(width, block.x), divup(height, block.y));
resize_linear_kernel_mean << < grid, block >> >
((const unsigned char *) src_gpu.gpu_data(),
dst->mutable_gpu_data() + dst->offset(start_axis),
origin_channel, origin_height, origin_width,
stepwidth, height, width, fx, fy, mean_b, mean_g, mean_r,
channel_axis, scale);
return true;
}
bool ResizeGPU(const base::Image8U &src,
std::shared_ptr<apollo::perception::base::Blob<float> > dst,
int stepwidth,
int start_axis,
float mean_b,
float mean_g,
float mean_r,
bool channel_axis,
float scale) {
int width = dst->shape(2);
int height = dst->shape(1);
int channel = dst->shape(3);
int origin_channel = src.channels();
int origin_height = src.rows();
int origin_width = src.cols();
if (!channel_axis) {
// channel_axis: false
// SRC: 1 H W C
// DST: 1 C H W
width = dst->shape(3);
height = dst->shape(2);
channel = dst->shape(1);
}
// channel_axis: true
// SRC: 1 H W C
// DST: 1 H W C
if (origin_channel != channel) {
AERROR << "channel should be the same after resize.";
return false;
}
float fx = static_cast<float>(origin_width) / static_cast<float>(width);
float fy = static_cast<float>(origin_height) / static_cast<float>(height);
const dim3 block(32, 8);
const dim3 grid(divup(width, block.x), divup(height, block.y));
resize_linear_kernel_mean << < grid, block >> >
(src.gpu_data(), dst->mutable_gpu_data() + dst->offset(start_axis),
origin_channel, origin_height, origin_width,
stepwidth, height, width, fx, fy, mean_b, mean_g, mean_r,
channel_axis, scale);
return true;
}
} // namespace inference
} // namespace perception
} // namespace apollo
| b3c09879b72a9d15b8aeab3a5d367ecf2f8fbdac.cu | /******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/perception/inference/utils/resize.h"
#include <algorithm>
#include "cyber/common/log.h"
#include "modules/perception/inference/utils/util.h"
#include "modules/perception/inference/utils/cuda_util.h"
namespace apollo {
namespace perception {
namespace inference {
__global__ void resize_linear_kernel(const unsigned char *src,
float *dst,
int channel,
int height,
int width,
int stepwidth,
int dst_height,
int dst_width,
float fx,
float fy) {
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;//(block的数量dst_width/32) * (dst_height /8)
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;//Id表示当前当前正在调用的线程数 线程总数(32*8=256)
if (dst_x < dst_width && dst_y < dst_height) {
float src_x = (dst_x + 0.5) * fx - 0.5;//fx = (origin_width) / (width);
float src_y = (dst_y + 0.5) * fy - 0.5;
const int x1 = __float2int_rd(src_x);
const int y1 = __float2int_rd(src_y);
const int x1_read = max(x1, 0);
const int y1_read = max(y1, 0);
const int x2 = x1 + 1;
const int y2 = y1 + 1;
const int x2_read = min(x2, width - 1);
const int y2_read = min(y2, height - 1);
int src_reg = 0;
for (int c = 0; c < channel; c++) {
float out = 0;
int idx11 = (y1_read * stepwidth + x1_read) * channel;
src_reg = src[idx11 + c];
out = out + (x2 - src_x) * (y2 - src_y) * src_reg;
int idx12 = (y1_read * stepwidth + x2_read) * channel;
src_reg = src[idx12 + c];
out = out + src_reg * (src_x - x1) * (y2 - src_y);
int idx21 = (y2_read * stepwidth + x1_read) * channel;
src_reg = src[idx21 + c];
out = out + src_reg * (x2 - src_x) * (src_y - y1);
int idx22 = (y2_read * stepwidth + x2_read) * channel;
src_reg = src[idx22 + c];
out = out + src_reg * (src_x - x1) * (src_y - y1);
if (out < 0) {
out = 0;
}
if (out > 255) {
out = 255;
}
int dst_idx = (dst_y * dst_width + dst_x) * channel + c;
dst[dst_idx] = out;
}
}
}
int divup(int a, int b) {
if (a % b) {
return a / b + 1;
} else {
return a / b;
}
}
template<typename T>
__global__ void resize_linear_kernel_mean(const unsigned char *src,
float *dst,
int channel,
int height,
int width,
int stepwidth,
int dst_height,
int dst_width,
float fx,
float fy,
T mean_b,
T mean_g,
T mean_r,
bool channel_axis,
float scale) {
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dst_x < dst_width && dst_y < dst_height) {
float src_x = (dst_x + 0.5) * fx - 0.5;
float src_y = (dst_y + 0.5) * fy - 0.5;
const int x1 = __float2int_rd(src_x);
const int y1 = __float2int_rd(src_y);
const int x1_read = max(x1, 0);
const int y1_read = max(y1, 0);
const int x2 = x1 + 1;
const int y2 = y1 + 1;
const int x2_read = min(x2, width - 1);
const int y2_read = min(y2, height - 1);
// (h*width+w)*channel+c
int src_reg = 0;
for (int c = 0; c < channel; c++) {
float out = 0;
int idx11 = (y1_read * stepwidth + x1_read) * channel;
src_reg = src[idx11 + c];
out = out + (x2 - src_x) * (y2 - src_y) * src_reg;
int idx12 = (y1_read * stepwidth + x2_read) * channel;
src_reg = src[idx12 + c];
out = out + src_reg * (src_x - x1) * (y2 - src_y);
int idx21 = (y2_read * stepwidth + x1_read) * channel;
src_reg = src[idx21 + c];
out = out + src_reg * (x2 - src_x) * (src_y - y1);
int idx22 = (y2_read * stepwidth + x2_read) * channel;
src_reg = src[idx22 + c];
out = out + src_reg * (src_x - x1) * (src_y - y1);
if (out < 0) {
out = 0;
}
if (out > 255) {
out = 255;
}
int dst_idx;
if (channel_axis) {
dst_idx = (dst_y * dst_width + dst_x) * channel + c;
} else {
dst_idx = (c * dst_height + dst_y) * dst_width + dst_x;
}
// printf("%f %d %d %d %d %d %d %d\n",out,x1,y1,x2,y2,c,dst_y,dst_x);
// dst[dst_idx] = (out - mean[c]) * scale;
if (c == 0) {
dst[dst_idx] = (out - mean_b) * scale;
} else if (c == 1) {
dst[dst_idx] = (out - mean_g) * scale;
} else if (c == 2) {
dst[dst_idx] = (out - mean_r) * scale;
}
}
}
}
bool ResizeGPU(const base::Image8U &src,
std::shared_ptr<apollo::perception::base::Blob<float> > dst,
int stepwidth,
int start_axis) {
int width = dst->shape(2);
int height = dst->shape(1);
int channel = dst->shape(3);
int origin_channel = src.channels();
int origin_height = src.rows();
int origin_width = src.cols();
if (origin_channel != dst->shape(3)) {
AERROR << "channel should be the same after resize.";
return false;
}
float fx = static_cast<float>(origin_width) / static_cast<float>(width);
float fy = static_cast<float>(origin_height) / static_cast<float>(height);
const dim3 block(32, 8);// cuda线程的组织形式:block grid
const dim3 grid(divup(width, block.x), divup(height, block.y));//block.x =32 grid(width/32,height/8)向上取整
resize_linear_kernel << < grid, block >> >
(src.gpu_data(), dst->mutable_gpu_data(),
origin_channel, origin_height, origin_width,
stepwidth, height, width, fx, fy); //启动kernal
return true;
}
bool ResizeGPU(const apollo::perception::base::Blob<uint8_t> &src_gpu,
std::shared_ptr<apollo::perception::base::Blob<float> > dst,
int stepwidth,
int start_axis,
int mean_b,
int mean_g,
int mean_r,
bool channel_axis,
float scale) {
int width = dst->shape(2);
int height = dst->shape(1);
int channel = dst->shape(3);
int origin_channel = src_gpu.shape(3);
int origin_height = src_gpu.shape(1);
int origin_width = src_gpu.shape(2);
if (!channel_axis) {
// channel_axis: false
// SRC: 1 H W C
// DST: 1 C H W
width = dst->shape(3);
height = dst->shape(2);
channel = dst->shape(1);
}
// channel_axis: true
// SRC: 1 H W C
// DST: 1 H W C
if (origin_channel != channel) {
AERROR << "channel should be the same after resize.";
return false;
}
float fx = static_cast<float>(origin_width) / static_cast<float>(width);
float fy = static_cast<float>(origin_height) / static_cast<float>(height);
const dim3 block(32, 8);
const dim3 grid(divup(width, block.x), divup(height, block.y));
resize_linear_kernel_mean << < grid, block >> >
((const unsigned char *) src_gpu.gpu_data(),
dst->mutable_gpu_data() + dst->offset(start_axis),
origin_channel, origin_height, origin_width,
stepwidth, height, width, fx, fy, mean_b, mean_g, mean_r,
channel_axis, scale);
return true;
}
bool ResizeGPU(const base::Image8U &src,
std::shared_ptr<apollo::perception::base::Blob<float> > dst,
int stepwidth,
int start_axis,
float mean_b,
float mean_g,
float mean_r,
bool channel_axis,
float scale) {
int width = dst->shape(2);
int height = dst->shape(1);
int channel = dst->shape(3);
int origin_channel = src.channels();
int origin_height = src.rows();
int origin_width = src.cols();
if (!channel_axis) {
// channel_axis: false
// SRC: 1 H W C
// DST: 1 C H W
width = dst->shape(3);
height = dst->shape(2);
channel = dst->shape(1);
}
// channel_axis: true
// SRC: 1 H W C
// DST: 1 H W C
if (origin_channel != channel) {
AERROR << "channel should be the same after resize.";
return false;
}
float fx = static_cast<float>(origin_width) / static_cast<float>(width);
float fy = static_cast<float>(origin_height) / static_cast<float>(height);
const dim3 block(32, 8);
const dim3 grid(divup(width, block.x), divup(height, block.y));
resize_linear_kernel_mean << < grid, block >> >
(src.gpu_data(), dst->mutable_gpu_data() + dst->offset(start_axis),
origin_channel, origin_height, origin_width,
stepwidth, height, width, fx, fy, mean_b, mean_g, mean_r,
channel_axis, scale);
return true;
}
} // namespace inference
} // namespace perception
} // namespace apollo
|
67163f734b8f1ecc00d73f68022dc54556981c10.hip | // !!! This is a file automatically generated by hipify!!!
// File: force.cu
// C/Fortran interface to GPU port of force.F.
// includes standard headers
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// includes cuda headers
#include <hip/hip_runtime.h>
// includes project headers
#include "cuda_globals.h"
#include "Operator.h"
#include "cuda_helpers.h"
/******************************************************/
// CUDA kernels/wrappers used in FORLOC and FORHAR
// interpolates the pseudopotential on the grid of reciprocal lattice vectors
__global__ void cuforlocg(int np, int nrow, int npspts, char ngihalf, double tpi, double argsc,
double psgma2, double zz, double omega, int *i2, int *i3, int *lpctx, int *lpcty,
int *lpctz, double *lattb, double *psp, double *work)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// for each thread,
if( idx < np)
{
// fetch indices
int n1 = idx % nrow;
int nc = idx / nrow;
int n2 = i2[nc]-1; // c indexing
int n3 = i3[nc]-1;
// fetch loop counters
double lpx = lpctx[n1];
double lpy = lpcty[n2];
double lpz = lpctx[n3];
// calculate magnitude of reciprocal lattice vector
double gx = lpx*lattb[0]+lpy*lattb[3]+lpz*lattb[6];
double gy = lpx*lattb[1]+lpy*lattb[4]+lpz*lattb[7];
double gz = lpx*lattb[2]+lpy*lattb[5]+lpz*lattb[8];
double g = sqrt(gx*gx+gy*gy+gz*gz)*tpi;
if(g!=0 && g<psgma2)
{
// convert mag. (g) to a position in charge density array (prho)
int i = (int)(g*argsc); // c indexing
double rem = g-psp[i];
// interpolate pseudopotential and its derivative
double vpst = psp[i+npspts]+rem*(psp[i+npspts*2]+rem*(psp[i+npspts*3] + rem*psp[i+npspts*4]));
work[idx]=(vpst+zz/(g*g))/omega;
}
else
work[idx]=0;
}
}
// interpolates the pseudopotential on the grid of reciprocal lattice vectors
extern "C"
void cuda_forlocg_(int *np, int *nrow, int *npspts, char *ngihalf, double *tpi, double *argsc,
double *psgma2, double *zz, double *omega, devptr_t *i2, devptr_t *i3, devptr_t *lpctx,
devptr_t *lpcty, devptr_t *lpctz, devptr_t *devptr_lattb, devptr_t *devptr_psp,
devptr_t *devptr_work)
{
// grid dimensions
int N = *np;
dim3 block(MAX_THREADS);
dim3 grid((N+block.x-1)/block.x);
// device pointers
double *lattb = (double *)(*devptr_lattb);
double *psp = (double *)(*devptr_psp);
double *work = (double *)(*devptr_work);
// interpolate pseudopotential
hipLaunchKernelGGL(( cuforlocg), dim3(grid),dim3(block), 0, 0, *np,*nrow,*npspts,*ngihalf,*tpi,*argsc,*psgma2,*zz,*omega,
(int*)*i2,(int*)*i3,(int*)*lpctx,(int*)*lpcty,(int*)*lpctz,lattb,psp,work);
CUDA_ERROR( hipDeviceSynchronize(), "Failed to execute CUDA kernel cuforlocg!" );
}
// interpolates the pseudopotential on the grid of reciprocal lattice vectors
__global__ void cuforharg(int np, int nrow, char ngihalf, double tpi, double argsc,double psgma2,
int *i2, int *i3, int *lpctx, int *lpcty, int *lpctz, double *lattb,
double *prho, double *work)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// for each thread,
if( idx < np)
{
// fetch indices
int n1 = idx % nrow;
int nc = idx / nrow;
int n2 = i2[nc]-1; // c indexing
int n3 = i3[nc]-1;
// fetch loop counters
double lpx = lpctx[n1];
double lpy = lpcty[n2];
double lpz = lpctx[n3];
// calculate magnitude of reciprocal lattice vector
double gx = lpx*lattb[0]+lpy*lattb[3]+lpz*lattb[6];
double gy = lpx*lattb[1]+lpy*lattb[4]+lpz*lattb[7];
double gz = lpx*lattb[2]+lpy*lattb[5]+lpz*lattb[8];
double g = sqrt(gx*gx+gy*gy+gz*gz)*tpi;
if(g!=0 && g<psgma2)
{
// convert mag. (g) to a position in charge density array (prho)
double arg = g*argsc+1;
int naddr = (arg>2) ? (int)arg : 2;
double rem = arg-naddr;
naddr-=1; // c indexing
// fetch atomic charge density
double v1=prho[naddr-1];
double v2=prho[naddr];
double v3=prho[naddr+1];
double v4=prho[naddr+2];
// interpolate atomic charge density
double t0=v2;
double t1=((6*v3)-(2*v1)-(3*v2)-v4)/6.0;
double t2=(v1+v3-(2*v2))/2.0;
double t3=(v4-v1+(3*(v2-v3)))/0.6;
work[idx]=t0+rem*(t1+rem*(t2+rem*t3));
}
else
work[idx]=0;
}
}
// interpolates the pseudopotential on the grid of reciprocal lattice vectors
extern "C"
void cuda_forharg_(const int *np, const int *nrow, char *ngihalf, double *tpi, double *argsc,
double *psgma2, devptr_t *i2, devptr_t *i3, devptr_t *lpctx, devptr_t *lpcty, devptr_t *lpctz,
devptr_t *devptr_lattb, devptr_t *devptr_prho, devptr_t *devptr_work)
{
// grid dimensions
int N = *np;
dim3 block(MAX_THREADS);
dim3 grid((N+block.x-1)/block.x);
// device pointers
double *lattb = (double *)(*devptr_lattb);
double *prho = (double *)(*devptr_prho);
double *work = (double *)(*devptr_work);
// interpolate pseudopotential
hipLaunchKernelGGL(( cuforharg), dim3(grid),dim3(block), 0, 0, *np,*nrow,*ngihalf,*tpi,*argsc,*psgma2,(int*)*i2,(int*)*i3,
(int*)*lpctx,(int*)*lpcty,(int*)*lpctz,lattb,prho,work);
CUDA_ERROR( hipDeviceSynchronize(), "Failed to execute CUDA kernel cucalcg!" );
}
// calculate the total force on the ions
__global__ void cucalcf(int np, int nrow, char ngihalf, double poisonx, double poisony,
double poisonz, hipDoubleComplex citpi, double vca, int *i2, int *i3,
int *lpctx, int *lpcty, int *lpctz, int *lpctx_, int *lpcty_, int *lpctz_,
hipDoubleComplex *ch, double *work, double *f1, double *f2, double *f3)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// for each thread,
if( idx < np)
{
// fetch indices
int n1 = idx % nrow;
int nc = idx / nrow;
int n2 = i2[nc]-1; // c indexing
int n3 = i3[nc]-1;
// calculate phase factor b
double g = poisonx*lpctx[n1] + poisony*lpctx[n2] + poisonz*lpctx[n3];
hipDoubleComplex b = cexp(citpi*g)*vca;
// calculate force contribution for a
// single reciprocal lattice vector
hipDoubleComplex a = ch[idx];
double f0 = work[idx]*(a.x*b.y-a.y*b.x);
// scale force contribution
if(ngihalf=='z' && n3)
f0*=2;
else if(ngihalf=='x' && n1)
f0*=2;
// add the contribution to the force
f1[idx]=-lpctx_[n1]*f0; // FOR1
f2[idx]=-lpcty_[n2]*f0; // FOR2
f3[idx]=-lpctz_[n3]*f0; // FOR3
}
}
// calculate the total force on the ions
extern "C"
void cuda_calcf_(const int *np, const int *nrow, char *ngihalf, double *poison,
hipDoubleComplex *citpi, double *vca, devptr_t *i2, devptr_t *i3,
devptr_t *lpctx, devptr_t *lpcty, devptr_t *lpctz,
devptr_t *lpctx_, devptr_t *lpcty_, devptr_t *lpctz_,
devptr_t *devptr_f1, devptr_t *devptr_f2, devptr_t *devptr_f3,
devptr_t *devptr_ch, devptr_t *devptr_work, double *force)
{
// grid dimensions
int N = *np;
dim3 block(MAX_THREADS);
dim3 grid((N+block.x-1)/block.x);
// size of shared memory buffer
int ssize = 3*block.x*sizeof(double);
// device pointers
hipDoubleComplex *ch = (hipDoubleComplex *)(*devptr_ch);
double *work = (double *)(*devptr_work);
double *f1 = (double *)(*devptr_f1);
double *f2 = (double *)(*devptr_f2);
double *f3 = (double *)(*devptr_f3);
// calculate force contributions
hipLaunchKernelGGL(( cucalcf), dim3(grid),dim3(block), 0, 0, N,*nrow,*ngihalf,poison[0],poison[1],poison[2],*citpi,*vca,
(int*)*i2,(int*)*i3,(int*)*lpctx,(int*)*lpcty,(int*)*lpctz,(int*)*lpctx_,
(int*)*lpcty_,(int*)*lpctz_,ch,work,f1,f2,f3);
// calculate total force by summing
// over reciprocal lattice vectors
hipLaunchKernelGGL(( cureducesum3), dim3(grid),dim3(block),ssize, 0, f1,f2,f3,N);
hipLaunchKernelGGL(( cureducesum3_1block), dim3(1),dim3(block),ssize, 0, f1,f2,f3,grid.x);
// copy sums from device to host
CUDA_ERROR( hipMemcpy(&force[0],f1,sizeof(double),hipMemcpyDeviceToHost),
"Failed to copy from device to host in cuda_calcharfor!");
CUDA_ERROR( hipMemcpy(&force[1],f2,sizeof(double),hipMemcpyDeviceToHost),
"Failed to copy from device to host in cuda_calcharfor!");
CUDA_ERROR( hipMemcpy(&force[2],f3,sizeof(double),hipMemcpyDeviceToHost),
"Failed to copy from device to host in cuda_calcharfor");
}
/******************************************************/
| 67163f734b8f1ecc00d73f68022dc54556981c10.cu | // File: force.cu
// C/Fortran interface to GPU port of force.F.
// includes standard headers
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// includes cuda headers
#include <cuda_runtime.h>
// includes project headers
#include "cuda_globals.h"
#include "Operator.h"
#include "cuda_helpers.h"
/******************************************************/
// CUDA kernels/wrappers used in FORLOC and FORHAR
// interpolates the pseudopotential on the grid of reciprocal lattice vectors
__global__ void cuforlocg(int np, int nrow, int npspts, char ngihalf, double tpi, double argsc,
double psgma2, double zz, double omega, int *i2, int *i3, int *lpctx, int *lpcty,
int *lpctz, double *lattb, double *psp, double *work)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// for each thread,
if( idx < np)
{
// fetch indices
int n1 = idx % nrow;
int nc = idx / nrow;
int n2 = i2[nc]-1; // c indexing
int n3 = i3[nc]-1;
// fetch loop counters
double lpx = lpctx[n1];
double lpy = lpcty[n2];
double lpz = lpctx[n3];
// calculate magnitude of reciprocal lattice vector
double gx = lpx*lattb[0]+lpy*lattb[3]+lpz*lattb[6];
double gy = lpx*lattb[1]+lpy*lattb[4]+lpz*lattb[7];
double gz = lpx*lattb[2]+lpy*lattb[5]+lpz*lattb[8];
double g = sqrt(gx*gx+gy*gy+gz*gz)*tpi;
if(g!=0 && g<psgma2)
{
// convert mag. (g) to a position in charge density array (prho)
int i = (int)(g*argsc); // c indexing
double rem = g-psp[i];
// interpolate pseudopotential and its derivative
double vpst = psp[i+npspts]+rem*(psp[i+npspts*2]+rem*(psp[i+npspts*3] + rem*psp[i+npspts*4]));
work[idx]=(vpst+zz/(g*g))/omega;
}
else
work[idx]=0;
}
}
// interpolates the pseudopotential on the grid of reciprocal lattice vectors
extern "C"
void cuda_forlocg_(int *np, int *nrow, int *npspts, char *ngihalf, double *tpi, double *argsc,
double *psgma2, double *zz, double *omega, devptr_t *i2, devptr_t *i3, devptr_t *lpctx,
devptr_t *lpcty, devptr_t *lpctz, devptr_t *devptr_lattb, devptr_t *devptr_psp,
devptr_t *devptr_work)
{
// grid dimensions
int N = *np;
dim3 block(MAX_THREADS);
dim3 grid((N+block.x-1)/block.x);
// device pointers
double *lattb = (double *)(*devptr_lattb);
double *psp = (double *)(*devptr_psp);
double *work = (double *)(*devptr_work);
// interpolate pseudopotential
cuforlocg<<<grid,block>>>(*np,*nrow,*npspts,*ngihalf,*tpi,*argsc,*psgma2,*zz,*omega,
(int*)*i2,(int*)*i3,(int*)*lpctx,(int*)*lpcty,(int*)*lpctz,lattb,psp,work);
CUDA_ERROR( cudaDeviceSynchronize(), "Failed to execute CUDA kernel cuforlocg!" );
}
// interpolates the pseudopotential on the grid of reciprocal lattice vectors
__global__ void cuforharg(int np, int nrow, char ngihalf, double tpi, double argsc,double psgma2,
int *i2, int *i3, int *lpctx, int *lpcty, int *lpctz, double *lattb,
double *prho, double *work)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// for each thread,
if( idx < np)
{
// fetch indices
int n1 = idx % nrow;
int nc = idx / nrow;
int n2 = i2[nc]-1; // c indexing
int n3 = i3[nc]-1;
// fetch loop counters
double lpx = lpctx[n1];
double lpy = lpcty[n2];
double lpz = lpctx[n3];
// calculate magnitude of reciprocal lattice vector
double gx = lpx*lattb[0]+lpy*lattb[3]+lpz*lattb[6];
double gy = lpx*lattb[1]+lpy*lattb[4]+lpz*lattb[7];
double gz = lpx*lattb[2]+lpy*lattb[5]+lpz*lattb[8];
double g = sqrt(gx*gx+gy*gy+gz*gz)*tpi;
if(g!=0 && g<psgma2)
{
// convert mag. (g) to a position in charge density array (prho)
double arg = g*argsc+1;
int naddr = (arg>2) ? (int)arg : 2;
double rem = arg-naddr;
naddr-=1; // c indexing
// fetch atomic charge density
double v1=prho[naddr-1];
double v2=prho[naddr];
double v3=prho[naddr+1];
double v4=prho[naddr+2];
// interpolate atomic charge density
double t0=v2;
double t1=((6*v3)-(2*v1)-(3*v2)-v4)/6.0;
double t2=(v1+v3-(2*v2))/2.0;
double t3=(v4-v1+(3*(v2-v3)))/0.6;
work[idx]=t0+rem*(t1+rem*(t2+rem*t3));
}
else
work[idx]=0;
}
}
// interpolates the pseudopotential on the grid of reciprocal lattice vectors
extern "C"
void cuda_forharg_(const int *np, const int *nrow, char *ngihalf, double *tpi, double *argsc,
double *psgma2, devptr_t *i2, devptr_t *i3, devptr_t *lpctx, devptr_t *lpcty, devptr_t *lpctz,
devptr_t *devptr_lattb, devptr_t *devptr_prho, devptr_t *devptr_work)
{
// grid dimensions
int N = *np;
dim3 block(MAX_THREADS);
dim3 grid((N+block.x-1)/block.x);
// device pointers
double *lattb = (double *)(*devptr_lattb);
double *prho = (double *)(*devptr_prho);
double *work = (double *)(*devptr_work);
// interpolate pseudopotential
cuforharg<<<grid,block>>>(*np,*nrow,*ngihalf,*tpi,*argsc,*psgma2,(int*)*i2,(int*)*i3,
(int*)*lpctx,(int*)*lpcty,(int*)*lpctz,lattb,prho,work);
CUDA_ERROR( cudaDeviceSynchronize(), "Failed to execute CUDA kernel cucalcg!" );
}
// calculate the total force on the ions
__global__ void cucalcf(int np, int nrow, char ngihalf, double poisonx, double poisony,
double poisonz, cuDoubleComplex citpi, double vca, int *i2, int *i3,
int *lpctx, int *lpcty, int *lpctz, int *lpctx_, int *lpcty_, int *lpctz_,
cuDoubleComplex *ch, double *work, double *f1, double *f2, double *f3)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// for each thread,
if( idx < np)
{
// fetch indices
int n1 = idx % nrow;
int nc = idx / nrow;
int n2 = i2[nc]-1; // c indexing
int n3 = i3[nc]-1;
// calculate phase factor b
double g = poisonx*lpctx[n1] + poisony*lpctx[n2] + poisonz*lpctx[n3];
cuDoubleComplex b = cexp(citpi*g)*vca;
// calculate force contribution for a
// single reciprocal lattice vector
cuDoubleComplex a = ch[idx];
double f0 = work[idx]*(a.x*b.y-a.y*b.x);
// scale force contribution
if(ngihalf=='z' && n3)
f0*=2;
else if(ngihalf=='x' && n1)
f0*=2;
// add the contribution to the force
f1[idx]=-lpctx_[n1]*f0; // FOR1
f2[idx]=-lpcty_[n2]*f0; // FOR2
f3[idx]=-lpctz_[n3]*f0; // FOR3
}
}
// calculate the total force on the ions
extern "C"
void cuda_calcf_(const int *np, const int *nrow, char *ngihalf, double *poison,
cuDoubleComplex *citpi, double *vca, devptr_t *i2, devptr_t *i3,
devptr_t *lpctx, devptr_t *lpcty, devptr_t *lpctz,
devptr_t *lpctx_, devptr_t *lpcty_, devptr_t *lpctz_,
devptr_t *devptr_f1, devptr_t *devptr_f2, devptr_t *devptr_f3,
devptr_t *devptr_ch, devptr_t *devptr_work, double *force)
{
// grid dimensions
int N = *np;
dim3 block(MAX_THREADS);
dim3 grid((N+block.x-1)/block.x);
// size of shared memory buffer
int ssize = 3*block.x*sizeof(double);
// device pointers
cuDoubleComplex *ch = (cuDoubleComplex *)(*devptr_ch);
double *work = (double *)(*devptr_work);
double *f1 = (double *)(*devptr_f1);
double *f2 = (double *)(*devptr_f2);
double *f3 = (double *)(*devptr_f3);
// calculate force contributions
cucalcf<<<grid,block>>>(N,*nrow,*ngihalf,poison[0],poison[1],poison[2],*citpi,*vca,
(int*)*i2,(int*)*i3,(int*)*lpctx,(int*)*lpcty,(int*)*lpctz,(int*)*lpctx_,
(int*)*lpcty_,(int*)*lpctz_,ch,work,f1,f2,f3);
// calculate total force by summing
// over reciprocal lattice vectors
cureducesum3<<<grid,block,ssize>>>(f1,f2,f3,N);
cureducesum3_1block<<<1,block,ssize>>>(f1,f2,f3,grid.x);
// copy sums from device to host
CUDA_ERROR( cudaMemcpy(&force[0],f1,sizeof(double),cudaMemcpyDeviceToHost),
"Failed to copy from device to host in cuda_calcharfor!");
CUDA_ERROR( cudaMemcpy(&force[1],f2,sizeof(double),cudaMemcpyDeviceToHost),
"Failed to copy from device to host in cuda_calcharfor!");
CUDA_ERROR( cudaMemcpy(&force[2],f3,sizeof(double),cudaMemcpyDeviceToHost),
"Failed to copy from device to host in cuda_calcharfor");
}
/******************************************************/
|
43fabef118eb2f1e98520415bdc7169193e3739f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#define n 1000
#define MAX_GRIDSIZE 1
__device__ float __expf(float x);
__global__ void expMass(float *A,float *x, int arraySize)
{
int index = (blockIdx.y*MAX_GRIDSIZE + blockIdx.x)*blockDim.x + threadIdx.x;
if (index < arraySize)
A[index]=expf(x[index]);
}
int main()
{
float a = 1, b = 5;
float h = ((10-1)*1.0)/n;
float x[n], S[n],arr[n];
float *A, *dev_x;
x[0] = a;
for (int i = 1; i < n; i++)
{
x[i] = i*h;
arr[i] = exp(x[i]);
}
int size = sizeof(float);
hipMalloc((void**)&A, n*size);
hipMalloc((void**)&dev_x, n*size);
hipMemcpy(dev_x, &x, n*size, hipMemcpyHostToDevice);
expMass << <1, n >> > (A, dev_x,n);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("GPUassert: %s %s %d\n", hipGetErrorString(err));
}
hipMemcpy(S, A, n*size, hipMemcpyDeviceToHost);
for (int i = 1; i < n; i++)
{
printf("x=%f, S=%f, arr=%f, err=%f \n", x[i], S[i], arr[i],abs(S[i] - arr[i]) / n);
}
getchar();
} | 43fabef118eb2f1e98520415bdc7169193e3739f.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#define n 1000
#define MAX_GRIDSIZE 1
__device__ float __expf(float x);
__global__ void expMass(float *A,float *x, int arraySize)
{
int index = (blockIdx.y*MAX_GRIDSIZE + blockIdx.x)*blockDim.x + threadIdx.x;
if (index < arraySize)
A[index]=expf(x[index]);
}
int main()
{
float a = 1, b = 5;
float h = ((10-1)*1.0)/n;
float x[n], S[n],arr[n];
float *A, *dev_x;
x[0] = a;
for (int i = 1; i < n; i++)
{
x[i] = i*h;
arr[i] = exp(x[i]);
}
int size = sizeof(float);
cudaMalloc((void**)&A, n*size);
cudaMalloc((void**)&dev_x, n*size);
cudaMemcpy(dev_x, &x, n*size, cudaMemcpyHostToDevice);
expMass << <1, n >> > (A, dev_x,n);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("GPUassert: %s %s %d\n", cudaGetErrorString(err));
}
cudaMemcpy(S, A, n*size, cudaMemcpyDeviceToHost);
for (int i = 1; i < n; i++)
{
printf("x=%f, S=%f, arr=%f, err=%f \n", x[i], S[i], arr[i],abs(S[i] - arr[i]) / n);
}
getchar();
} |
1b7980db3f0dcb903a2d207dfbaeb9010f97eb92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <duda/kernels/eye.hpp>
namespace duda
{
namespace detail
{
template <typename T>
__global__ void eye_kernel(T* const data, const int dim)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < dim && j < dim)
{
const int ix = i + j * dim;
data[ix] = i == j;
}
}
template <typename T>
inline void eye(T* const data, const int dim)
{
const int d = 16;
const int n = (dim + d) / d;
const dim3 blocks(n, n);
const dim3 block_dim(d, d);
hipLaunchKernelGGL(( eye_kernel), dim3(blocks), dim3(block_dim), 0, 0, data, dim);
}
} // namespace detail
void eye(int* const data, const int dim)
{
detail::eye(data, dim);
}
void eye(float* const data, const int dim)
{
detail::eye(data, dim);
}
void eye(double* const data, const int dim)
{
detail::eye(data, dim);
}
} // namespace duda
| 1b7980db3f0dcb903a2d207dfbaeb9010f97eb92.cu | #include <duda/kernels/eye.hpp>
namespace duda
{
namespace detail
{
template <typename T>
__global__ void eye_kernel(T* const data, const int dim)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < dim && j < dim)
{
const int ix = i + j * dim;
data[ix] = i == j;
}
}
template <typename T>
inline void eye(T* const data, const int dim)
{
const int d = 16;
const int n = (dim + d) / d;
const dim3 blocks(n, n);
const dim3 block_dim(d, d);
eye_kernel<<<blocks, block_dim>>>(data, dim);
}
} // namespace detail
void eye(int* const data, const int dim)
{
detail::eye(data, dim);
}
void eye(float* const data, const int dim)
{
detail::eye(data, dim);
}
void eye(double* const data, const int dim)
{
detail::eye(data, dim);
}
} // namespace duda
|
1997d064a0199ab5873671fcb547028868c4993c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* c-ray_task_sgpu.cu
*
* Created on: Mar 24, 2017
* Author: chao
*/
#include "c-ray_task_mgpu.h"
#include "c-ray_kernel_v2.h"
#include "../../../common/helper_err.h"
__device__ global_vars g_vars_d;
__device__ vec3_t lights_d[MAX_LIGHTS];
__device__ vec2_t urand_d[NRAN];
__device__ int irand_d[NRAN];
thread_local int crayMGPU::local_yres;
thread_local int crayMGPU::local_startYresIndex;
void crayMGPU::initImpl(global_vars g_vars,
sphere_array_t obj_array,
uint32_t *pixels,
vec3_t *lights){
if(__localThreadId ==0){
std::cout<<"task: "<<getCurrentTask()->getName()<<" begin init ...\n";
this->g_vars = g_vars;
this->obj_array = obj_array;
this->pixels = pixels;
this->lights = lights;
}
intra_Barrier();
int yresPerThread = g_vars.yres / __numLocalThreads;
if(__localThreadId < g_vars.yres % __numLocalThreads){
local_yres = yresPerThread +1;
local_startYresIndex = __localThreadId *(yresPerThread+1);
}
else{
local_yres = yresPerThread;
local_startYresIndex = __localThreadId*yresPerThread + g_vars.yres % __numLocalThreads;
}
intra_Barrier();
if(__localThreadId ==0){
std::cout<<"task: "<<getCurrentTask()->getName()<<" finish initImpl.\n";
}
}
void crayMGPU::runImpl(double runtime[][4], MemType memtype){
if(__localThreadId == 0){
std::cout<<getCurrentTask()->getName()<<" begin run ..."<<std::endl;
}
Timer timer, timer0;
double totaltime;
int xres = g_vars.xres; //column
int yres = g_vars.yres; //row
GpuData<unsigned int> partial_pixels_d(xres*local_yres);
GpuData<vec3_t> obj_array_pos(g_vars.obj_count);
GpuData<material_t> obj_array_mat(g_vars.obj_count);
GpuData<FTYPE> obj_array_rad(g_vars.obj_count);
obj_array_pos.initH(obj_array.pos);
obj_array_mat.initH(obj_array.mat);
obj_array_rad.initH(obj_array.rad);
vec2_t urand[NRAN];
int irand[NRAN];
for(int i=0; i<NRAN; i++) urand[i].x = (double)rand() / RAND_MAX - 0.5;
for(int i=0; i<NRAN; i++) urand[i].y = (double)rand() / RAND_MAX - 0.5;
for(int i=0; i<NRAN; i++) irand[i] = (int)(NRAN * ((double)rand() / RAND_MAX));
/*
* data in
*/
timer0.start();
timer.start();
obj_array_pos.sync();
obj_array_mat.sync();
obj_array_rad.sync();
checkCudaErr(
hipMemcpyToSymbol(lights_d, lights, sizeof(vec3_t)*MAX_LIGHTS, 0,
hipMemcpyHostToDevice));
checkCudaErr(
hipMemcpyToSymbol(urand_d, urand, sizeof(vec2_t)*NRAN, 0,
hipMemcpyHostToDevice));
checkCudaErr(
hipMemcpyToSymbol(irand_d, irand, sizeof(int)*NRAN, 0,
hipMemcpyHostToDevice));
checkCudaErr(
hipMemcpyToSymbol(g_vars_d, (void*)&g_vars, sizeof(g_vars), 0,
hipMemcpyHostToDevice));
double copyinTime = timer.stop();
/*
* call kernel
*/
size_t stacksize;
hipThreadGetLimit(&stacksize, hipLimitStackSize);
stacksize = 1024*4;
hipThreadSetLimit(hipLimitStackSize, stacksize);
dim3 block(16, 16, 1);
dim3 grid((xres+block.x-1)/block.x, (local_yres+block.y-1)/block.y,1);
timer.start();
hipLaunchKernelGGL(( render_kernel), dim3(grid), dim3(block), 0, __streamId,
partial_pixels_d.getD(true),
obj_array_pos.getD(),
obj_array_mat.getD(),
obj_array_rad.getD(),
local_startYresIndex
);
checkCudaErr(hipGetLastError());
checkCudaErr(hipStreamSynchronize(__streamId));
double kernelTime = timer.stop();
/*
*
*/
timer.start();
partial_pixels_d.sync();
double copyoutTime = timer.stop();
intra_Barrier();
totaltime = timer0.stop();
partial_pixels_d.fetch(pixels+local_startYresIndex*xres);
//runtime[0] = copyinTime + copyoutTime + kernelTime;
runtime[__localThreadId][0] = totaltime;
runtime[__localThreadId][1]= kernelTime;
runtime[__localThreadId][2]= copyinTime;
runtime[__localThreadId][3]= copyoutTime;
if(__localThreadId ==0){
std::cout<<"task: "<<getCurrentTask()->getName()<<" finish runImpl.\n";
}
}
| 1997d064a0199ab5873671fcb547028868c4993c.cu | /*
* c-ray_task_sgpu.cu
*
* Created on: Mar 24, 2017
* Author: chao
*/
#include "c-ray_task_mgpu.h"
#include "c-ray_kernel_v2.h"
#include "../../../common/helper_err.h"
__device__ global_vars g_vars_d;
__device__ vec3_t lights_d[MAX_LIGHTS];
__device__ vec2_t urand_d[NRAN];
__device__ int irand_d[NRAN];
thread_local int crayMGPU::local_yres;
thread_local int crayMGPU::local_startYresIndex;
void crayMGPU::initImpl(global_vars g_vars,
sphere_array_t obj_array,
uint32_t *pixels,
vec3_t *lights){
if(__localThreadId ==0){
std::cout<<"task: "<<getCurrentTask()->getName()<<" begin init ...\n";
this->g_vars = g_vars;
this->obj_array = obj_array;
this->pixels = pixels;
this->lights = lights;
}
intra_Barrier();
int yresPerThread = g_vars.yres / __numLocalThreads;
if(__localThreadId < g_vars.yres % __numLocalThreads){
local_yres = yresPerThread +1;
local_startYresIndex = __localThreadId *(yresPerThread+1);
}
else{
local_yres = yresPerThread;
local_startYresIndex = __localThreadId*yresPerThread + g_vars.yres % __numLocalThreads;
}
intra_Barrier();
if(__localThreadId ==0){
std::cout<<"task: "<<getCurrentTask()->getName()<<" finish initImpl.\n";
}
}
void crayMGPU::runImpl(double runtime[][4], MemType memtype){
if(__localThreadId == 0){
std::cout<<getCurrentTask()->getName()<<" begin run ..."<<std::endl;
}
Timer timer, timer0;
double totaltime;
int xres = g_vars.xres; //column
int yres = g_vars.yres; //row
GpuData<unsigned int> partial_pixels_d(xres*local_yres);
GpuData<vec3_t> obj_array_pos(g_vars.obj_count);
GpuData<material_t> obj_array_mat(g_vars.obj_count);
GpuData<FTYPE> obj_array_rad(g_vars.obj_count);
obj_array_pos.initH(obj_array.pos);
obj_array_mat.initH(obj_array.mat);
obj_array_rad.initH(obj_array.rad);
vec2_t urand[NRAN];
int irand[NRAN];
for(int i=0; i<NRAN; i++) urand[i].x = (double)rand() / RAND_MAX - 0.5;
for(int i=0; i<NRAN; i++) urand[i].y = (double)rand() / RAND_MAX - 0.5;
for(int i=0; i<NRAN; i++) irand[i] = (int)(NRAN * ((double)rand() / RAND_MAX));
/*
* data in
*/
timer0.start();
timer.start();
obj_array_pos.sync();
obj_array_mat.sync();
obj_array_rad.sync();
checkCudaErr(
cudaMemcpyToSymbol(lights_d, lights, sizeof(vec3_t)*MAX_LIGHTS, 0,
cudaMemcpyHostToDevice));
checkCudaErr(
cudaMemcpyToSymbol(urand_d, urand, sizeof(vec2_t)*NRAN, 0,
cudaMemcpyHostToDevice));
checkCudaErr(
cudaMemcpyToSymbol(irand_d, irand, sizeof(int)*NRAN, 0,
cudaMemcpyHostToDevice));
checkCudaErr(
cudaMemcpyToSymbol(g_vars_d, (void*)&g_vars, sizeof(g_vars), 0,
cudaMemcpyHostToDevice));
double copyinTime = timer.stop();
/*
* call kernel
*/
size_t stacksize;
cudaThreadGetLimit(&stacksize, cudaLimitStackSize);
stacksize = 1024*4;
cudaThreadSetLimit(cudaLimitStackSize, stacksize);
dim3 block(16, 16, 1);
dim3 grid((xres+block.x-1)/block.x, (local_yres+block.y-1)/block.y,1);
timer.start();
render_kernel<<<grid, block, 0, __streamId>>>(
partial_pixels_d.getD(true),
obj_array_pos.getD(),
obj_array_mat.getD(),
obj_array_rad.getD(),
local_startYresIndex
);
checkCudaErr(cudaGetLastError());
checkCudaErr(cudaStreamSynchronize(__streamId));
double kernelTime = timer.stop();
/*
*
*/
timer.start();
partial_pixels_d.sync();
double copyoutTime = timer.stop();
intra_Barrier();
totaltime = timer0.stop();
partial_pixels_d.fetch(pixels+local_startYresIndex*xres);
//runtime[0] = copyinTime + copyoutTime + kernelTime;
runtime[__localThreadId][0] = totaltime;
runtime[__localThreadId][1]= kernelTime;
runtime[__localThreadId][2]= copyinTime;
runtime[__localThreadId][3]= copyoutTime;
if(__localThreadId ==0){
std::cout<<"task: "<<getCurrentTask()->getName()<<" finish runImpl.\n";
}
}
|
885f694e2ce46ca37185ac51540a0d163f2fd00e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
//
__global__ void histogram(unsigned char *dataIn, int *hist)
{
int threadIndex = threadIdx.x + threadIdx.y * blockDim.x;
int blockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int index = threadIndex + blockIndex * blockDim.x * blockDim.y;
atomicAdd(&hist[dataIn[index]], 1);
}
//
__global__ void histogram_optimized(unsigned char *buffer, long size, unsigned int *histo){
__shared__ unsigned int private_histo[256];
if(threadIdx.x < 256) //shared histo
private_histo[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
// threads
int stride = blockDim.x * gridDim.x;
while(i < size) {
atomicAdd(&(private_histo[buffer[i]]), 1);
i += stride;
}
//
__syncthreads();
if(threadIdx.x < 256){
atomicAdd(&(histo[threadIdx.x]), private_histo[threadIdx.x]);
}
}
} | 885f694e2ce46ca37185ac51540a0d163f2fd00e.cu | extern "C" {
//灰度直方图统计
__global__ void histogram(unsigned char *dataIn, int *hist)
{
int threadIndex = threadIdx.x + threadIdx.y * blockDim.x;
int blockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int index = threadIndex + blockIndex * blockDim.x * blockDim.y;
atomicAdd(&hist[dataIn[index]], 1);
}
//灰度图像直方图(优化)
__global__ void histogram_optimized(unsigned char *buffer, long size, unsigned int *histo){
__shared__ unsigned int private_histo[256];
if(threadIdx.x < 256) //初始化shared histo
private_histo[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
// 步长是所有threads的数目
int stride = blockDim.x * gridDim.x;
while(i < size) {
atomicAdd(&(private_histo[buffer[i]]), 1);
i += stride;
}
//等待所有线程执行完
__syncthreads();
if(threadIdx.x < 256){
atomicAdd(&(histo[threadIdx.x]), private_histo[threadIdx.x]);
}
}
} |
645f0cbf33a96bea010d195dc26bd0be8092e725.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "KerInOutInterpolateTime.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned npt = 1;
double fxtime = 1;
const float *vel0 = NULL;
hipMalloc(&vel0, XSIZE*YSIZE);
const float *vel1 = NULL;
hipMalloc(&vel1, XSIZE*YSIZE);
float *vel = NULL;
hipMalloc(&vel, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
KerInOutInterpolateTime), dim3(gridBlock),dim3(threadBlock), 0, 0, npt,fxtime,vel0,vel1,vel);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
KerInOutInterpolateTime), dim3(gridBlock),dim3(threadBlock), 0, 0, npt,fxtime,vel0,vel1,vel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
KerInOutInterpolateTime), dim3(gridBlock),dim3(threadBlock), 0, 0, npt,fxtime,vel0,vel1,vel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 645f0cbf33a96bea010d195dc26bd0be8092e725.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "KerInOutInterpolateTime.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned npt = 1;
double fxtime = 1;
const float *vel0 = NULL;
cudaMalloc(&vel0, XSIZE*YSIZE);
const float *vel1 = NULL;
cudaMalloc(&vel1, XSIZE*YSIZE);
float *vel = NULL;
cudaMalloc(&vel, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
KerInOutInterpolateTime<<<gridBlock,threadBlock>>>(npt,fxtime,vel0,vel1,vel);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
KerInOutInterpolateTime<<<gridBlock,threadBlock>>>(npt,fxtime,vel0,vel1,vel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
KerInOutInterpolateTime<<<gridBlock,threadBlock>>>(npt,fxtime,vel0,vel1,vel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
437f81b721ef8dee253920f05fea24a1e5d8e938.hip | // !!! This is a file automatically generated by hipify!!!
/* kernel.cu */
#include <iostream>
#include <cstdlib>
#include <cassert>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
template <int BlockSize>
__global__ void MultiplyMatrix(
float* matC, float* matA, float* matB,
int colA, int colB, int colC)
{
/* , C */
/* , C, */
/* C */
int bx = blockIdx.x;
int by = blockIdx.y;
/* C, */
int tx = threadIdx.x;
int ty = threadIdx.y;
/* A */
/* beginAA(BlockSize * by, 0) */
int beginA = colA * (BlockSize * by);
/* A */
/* endAA(Blocksize * by, colA - 1) */
int endA = beginA + colA - 1;
/* A */
/* stepA */
int stepA = BlockSize;
/* B */
/* beginBB(0, BlockSize * bx) */
int beginB = (BlockSize * bx);
/* B */
/* endBB(rowB - 1, BlockSize * bx) */
/* int endB = colB * (rowB - 1) + (BlockSize * bx); */
/* B */
/* stepB */
int stepB = colB * BlockSize;
/* C */
float subC = 0.0f;
/* C */
for (int a = beginA, b = beginB; a <= endA; a += stepA, b += stepB) {
/* AB, C
* abAB
* stepAstepB,
* AB */
/* */
__shared__ float subMatA[BlockSize][BlockSize];
__shared__ float subMatB[BlockSize][BlockSize];
/* , ,
* */
subMatA[ty][tx] = matA[a + colA * ty + tx];
subMatB[ty][tx] = matB[b + colB * ty + tx];
/* , */
__syncthreads();
#pragma unroll
for (int k = 0; k < BlockSize; ++k)
subC += subMatA[ty][k] * subMatB[k][tx];
/* AB, */
__syncthreads();
}
/* C */
int c = colC * (BlockSize * by) + (BlockSize * bx);
matC[c + colC * ty + tx] = subC;
}
int main(int argc, char** argv)
{
const unsigned int BlockSize = 32;
/* */
const float valueB = 0.01f;
const unsigned int rowA = 960;
const unsigned int colA = 480;
const unsigned int rowB = 480;
const unsigned int colB = 960;
assert(colA == rowB);
assert(rowA % BlockSize == 0);
assert(colA % BlockSize == 0);
assert(rowB % BlockSize == 0);
assert(colB % BlockSize == 0);
unsigned int rowC = rowA;
unsigned int colC = colB;
unsigned int memSizeA = sizeof(float) * rowA * colA;
unsigned int memSizeB = sizeof(float) * rowB * colB;
unsigned int memSizeC = sizeof(float) * rowC * colC;
std::cout << "matrix multiplication: "
<< "A(" << rowA << ", " << colA << ") * "
<< "B(" << rowB << ", " << colB << ")\n";
/* */
float* hostMatA = new (std::nothrow) float[rowA * colA];
if (hostMatA == nullptr) {
std::cerr << "failed to allocate sufficient memory for matrix A\n";
goto Cleanup;
}
float* hostMatB = new (std::nothrow) float[rowB * colB];
if (hostMatB == nullptr) {
std::cerr << "failed to allocate sufficient memory for matrix B\n";
goto Cleanup;
}
float* hostMatC = new (std::nothrow) float[rowC * colC];
if (hostMatC == nullptr) {
std::cerr << "failed to allocate sufficient memory for matrix C\n";
goto Cleanup;
}
/* AB */
std::fill(hostMatA, hostMatA + rowA * colA, 1.0f);
std::fill(hostMatB, hostMatB + rowB * colB, valueB);
hipError_t cudaErr = hipError_t::hipSuccess;
/* */
float* deviceMatA = nullptr;
cudaErr = ::hipMalloc(&deviceMatA, memSizeA);
if (cudaErr != hipError_t::hipSuccess) {
std::cerr << "failed to allocate device matrix A: "
<< ::hipGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
float* deviceMatB = nullptr;
cudaErr = ::hipMalloc(&deviceMatB, memSizeB);
if (cudaErr != hipError_t::hipSuccess) {
std::cerr << "failed to allocate device matrix B: "
<< ::hipGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
float* deviceMatC = nullptr;
cudaErr = ::hipMalloc(&deviceMatC, memSizeC);
if (cudaErr != hipError_t::hipSuccess) {
std::cerr << "failed to allocate device matrix C: "
<< ::hipGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
/* AB */
cudaErr = ::hipMemcpy(deviceMatA, hostMatA, memSizeA, hipMemcpyHostToDevice);
if (cudaErr != hipError_t::hipSuccess) {
std::cerr << "failed to copy matrix A from host to device: "
<< ::hipGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
cudaErr = ::hipMemcpy(deviceMatB, hostMatB, memSizeB, hipMemcpyHostToDevice);
if (cudaErr != hipError_t::hipSuccess) {
std::cerr << "failed to copy matrix B from host to device: "
<< ::hipGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
/* */
dim3 dimBlock { BlockSize, BlockSize, 1 };
dim3 dimGrid { colC / dimBlock.x, rowC / dimBlock.y, 1 };
hipLaunchKernelGGL(( MultiplyMatrix<BlockSize>), dim3(dimGrid), dim3(dimBlock), 0, 0,
deviceMatC, deviceMatA, deviceMatB, colA, colB, colC);
cudaErr = ::hipGetLastError();
if (cudaErr != hipError_t::hipSuccess) {
std::cerr << "failed to launch MultiplyMatrix kernel: "
<< ::hipGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
/* */
cudaErr = ::hipDeviceSynchronize();
if (cudaErr != hipError_t::hipSuccess) {
std::cerr << "hipDeviceSynchronize() failed: "
<< ::hipGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
/* */
cudaErr = ::hipMemcpy(hostMatC, deviceMatC, memSizeC, hipMemcpyDeviceToHost);
if (cudaErr != hipError_t::hipSuccess) {
std::cerr << "failed to copy matrix C from device to host: "
<< ::hipGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
/* */
for (unsigned int i = 0; i < rowC * colC; ++i) {
/* */
double absErr = ::fabs(hostMatC[i] - (colA * valueB));
double absValue = ::fabs(hostMatC[i]);
double dotLength = static_cast<double>(rowA);
double relErr = absErr / absValue / dotLength;
if (relErr > 1e-6) {
std::cerr << "result verification failed at element ("
<< (i / colC) << ", " << (i % colC) << ")\n";
goto Cleanup;
}
}
std::cout << "matrix multiplication succeeded\n";
Cleanup:
/* */
if (deviceMatA != nullptr) {
cudaErr = ::hipFree(deviceMatA);
if (cudaErr != hipError_t::hipSuccess)
std::cerr << "failed to free device matrix A: "
<< ::hipGetErrorString(cudaErr) << '\n';
}
if (deviceMatB != nullptr) {
cudaErr = ::hipFree(deviceMatB);
if (cudaErr != hipError_t::hipSuccess)
std::cerr << "failed to free device matrix A: "
<< ::hipGetErrorString(cudaErr) << '\n';
}
if (deviceMatC != nullptr) {
cudaErr = ::hipFree(deviceMatC);
if (cudaErr != hipError_t::hipSuccess)
std::cerr << "failed to free device matrix A: "
<< ::hipGetErrorString(cudaErr) << '\n';
}
/* */
if (hostMatA != nullptr)
delete[] hostMatA;
if (hostMatB != nullptr)
delete[] hostMatB;
if (hostMatC != nullptr)
delete[] hostMatC;
return EXIT_SUCCESS;
}
| 437f81b721ef8dee253920f05fea24a1e5d8e938.cu |
/* kernel.cu */
#include <iostream>
#include <cstdlib>
#include <cassert>
#include <algorithm>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
template <int BlockSize>
__global__ void MultiplyMatrix(
float* matC, float* matA, float* matB,
int colA, int colB, int colC)
{
/* 各ブロックでは, 行列Cの各部分行列の計算を実行 */
/* 各スレッドでは, 行列Cの部分行列内の, 各要素の計算を実行 */
/* 行列Cの部分行列のインデックス */
int bx = blockIdx.x;
int by = blockIdx.y;
/* 行列Cの部分行列内の, 各要素のインデックス */
int tx = threadIdx.x;
int ty = threadIdx.y;
/* 行列Aの最初の部分行列へのインデックス */
/* beginAは行列Aの(BlockSize * by, 0)要素 */
int beginA = colA * (BlockSize * by);
/* 行列Aの部分行列へのインデックスの最大値 */
/* endAは行列Aの(Blocksize * by, colA - 1)要素 */
int endA = beginA + colA - 1;
/* 行列Aの部分行列単位のステップ */
/* stepAは部分行列の大きさ分だけ横に移動させる */
int stepA = BlockSize;
/* 行列Bの最初の部分行列へのインデックス */
/* beginBは行列Bの(0, BlockSize * bx)要素 */
int beginB = (BlockSize * bx);
/* 行列Bの部分行列へのインデックスの最大値 */
/* endBは行列Bの(rowB - 1, BlockSize * bx)要素 */
/* int endB = colB * (rowB - 1) + (BlockSize * bx); */
/* 行列Bの部分行列単位のステップ */
/* stepBは部分行列の大きさ分だけ縦に移動させる */
int stepB = colB * BlockSize;
/* 行列Cの部分行列の各要素の計算結果 */
float subC = 0.0f;
/* 行列Cの部分行列の計算を実行 */
for (int a = beginA, b = beginB; a <= endA; a += stepA, b += stepB) {
/* 行列AとBの部分行列同士の乗算を繰り返して, 行列Cの部分行列を計算
* aとbは行列AとBの部分行列の先頭へのインデックス
* stepAとstepBずつインデックスを加算していくことで,
* 行列AとBの次の部分行列の先頭を参照できるようにする */
/* ブロック内の各スレッドが共有する部分行列 */
__shared__ float subMatA[BlockSize][BlockSize];
__shared__ float subMatB[BlockSize][BlockSize];
/* ブロック内の各スレッドが, デバイスメモリから共有メモリに,
* 部分行列の各要素を転送 */
subMatA[ty][tx] = matA[a + colA * ty + tx];
subMatB[ty][tx] = matB[b + colB * ty + tx];
/* 部分行列が共有メモリに転送されるように, スレッド間での同期をとる */
__syncthreads();
#pragma unroll
for (int k = 0; k < BlockSize; ++k)
subC += subMatA[ty][k] * subMatB[k][tx];
/* 行列AとBの次の部分行列を読み込む前に, スレッド間での同期をとる */
__syncthreads();
}
/* 各スレッドが行列Cの各要素に書き込み */
int c = colC * (BlockSize * by) + (BlockSize * bx);
matC[c + colC * ty + tx] = subC;
}
int main(int argc, char** argv)
{
const unsigned int BlockSize = 32;
/* 行列の行数と列数の設定 */
const float valueB = 0.01f;
const unsigned int rowA = 960;
const unsigned int colA = 480;
const unsigned int rowB = 480;
const unsigned int colB = 960;
assert(colA == rowB);
assert(rowA % BlockSize == 0);
assert(colA % BlockSize == 0);
assert(rowB % BlockSize == 0);
assert(colB % BlockSize == 0);
unsigned int rowC = rowA;
unsigned int colC = colB;
unsigned int memSizeA = sizeof(float) * rowA * colA;
unsigned int memSizeB = sizeof(float) * rowB * colB;
unsigned int memSizeC = sizeof(float) * rowC * colC;
std::cout << "matrix multiplication: "
<< "A(" << rowA << ", " << colA << ") * "
<< "B(" << rowB << ", " << colB << ")\n";
/* 行列用のメモリ領域を確保 */
float* hostMatA = new (std::nothrow) float[rowA * colA];
if (hostMatA == nullptr) {
std::cerr << "failed to allocate sufficient memory for matrix A\n";
goto Cleanup;
}
float* hostMatB = new (std::nothrow) float[rowB * colB];
if (hostMatB == nullptr) {
std::cerr << "failed to allocate sufficient memory for matrix B\n";
goto Cleanup;
}
float* hostMatC = new (std::nothrow) float[rowC * colC];
if (hostMatC == nullptr) {
std::cerr << "failed to allocate sufficient memory for matrix C\n";
goto Cleanup;
}
/* 行列AとBを初期化 */
std::fill(hostMatA, hostMatA + rowA * colA, 1.0f);
std::fill(hostMatB, hostMatB + rowB * colB, valueB);
cudaError_t cudaErr = cudaError::cudaSuccess;
/* デバイスの行列用のメモリ領域を確保 */
float* deviceMatA = nullptr;
cudaErr = ::cudaMalloc(&deviceMatA, memSizeA);
if (cudaErr != cudaError::cudaSuccess) {
std::cerr << "failed to allocate device matrix A: "
<< ::cudaGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
float* deviceMatB = nullptr;
cudaErr = ::cudaMalloc(&deviceMatB, memSizeB);
if (cudaErr != cudaError::cudaSuccess) {
std::cerr << "failed to allocate device matrix B: "
<< ::cudaGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
float* deviceMatC = nullptr;
cudaErr = ::cudaMalloc(&deviceMatC, memSizeC);
if (cudaErr != cudaError::cudaSuccess) {
std::cerr << "failed to allocate device matrix C: "
<< ::cudaGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
/* 行列AとBをホストからデバイスに転送 */
cudaErr = ::cudaMemcpy(deviceMatA, hostMatA, memSizeA, cudaMemcpyHostToDevice);
if (cudaErr != cudaError::cudaSuccess) {
std::cerr << "failed to copy matrix A from host to device: "
<< ::cudaGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
cudaErr = ::cudaMemcpy(deviceMatB, hostMatB, memSizeB, cudaMemcpyHostToDevice);
if (cudaErr != cudaError::cudaSuccess) {
std::cerr << "failed to copy matrix B from host to device: "
<< ::cudaGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
/* デバイス上でベクトルの乗算を実行 */
dim3 dimBlock { BlockSize, BlockSize, 1 };
dim3 dimGrid { colC / dimBlock.x, rowC / dimBlock.y, 1 };
MultiplyMatrix<BlockSize><<<dimGrid, dimBlock>>>(
deviceMatC, deviceMatA, deviceMatB, colA, colB, colC);
cudaErr = ::cudaGetLastError();
if (cudaErr != cudaError::cudaSuccess) {
std::cerr << "failed to launch MultiplyMatrix kernel: "
<< ::cudaGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
/* 計算が終わるまで待機 */
cudaErr = ::cudaDeviceSynchronize();
if (cudaErr != cudaError::cudaSuccess) {
std::cerr << "cudaDeviceSynchronize() failed: "
<< ::cudaGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
/* 計算結果をデバイスからホストに転送 */
cudaErr = ::cudaMemcpy(hostMatC, deviceMatC, memSizeC, cudaMemcpyDeviceToHost);
if (cudaErr != cudaError::cudaSuccess) {
std::cerr << "failed to copy matrix C from device to host: "
<< ::cudaGetErrorString(cudaErr) << '\n';
goto Cleanup;
}
/* 計算結果の検証 */
for (unsigned int i = 0; i < rowC * colC; ++i) {
/* 相対誤差の計算 */
double absErr = std::fabs(hostMatC[i] - (colA * valueB));
double absValue = std::fabs(hostMatC[i]);
double dotLength = static_cast<double>(rowA);
double relErr = absErr / absValue / dotLength;
if (relErr > 1e-6) {
std::cerr << "result verification failed at element ("
<< (i / colC) << ", " << (i % colC) << ")\n";
goto Cleanup;
}
}
std::cout << "matrix multiplication succeeded\n";
Cleanup:
/* デバイスの行列用のメモリ領域を解放 */
if (deviceMatA != nullptr) {
cudaErr = ::cudaFree(deviceMatA);
if (cudaErr != cudaError::cudaSuccess)
std::cerr << "failed to free device matrix A: "
<< ::cudaGetErrorString(cudaErr) << '\n';
}
if (deviceMatB != nullptr) {
cudaErr = ::cudaFree(deviceMatB);
if (cudaErr != cudaError::cudaSuccess)
std::cerr << "failed to free device matrix A: "
<< ::cudaGetErrorString(cudaErr) << '\n';
}
if (deviceMatC != nullptr) {
cudaErr = ::cudaFree(deviceMatC);
if (cudaErr != cudaError::cudaSuccess)
std::cerr << "failed to free device matrix A: "
<< ::cudaGetErrorString(cudaErr) << '\n';
}
/* 行列用のメモリ領域を解放 */
if (hostMatA != nullptr)
delete[] hostMatA;
if (hostMatB != nullptr)
delete[] hostMatB;
if (hostMatC != nullptr)
delete[] hostMatC;
return EXIT_SUCCESS;
}
|
86c21893dfc69bc802231f33e63b2bd76eadd329.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/sgemm_mgpu.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ahmad Abdelfattah
* @date 2018-11-14
**/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include <stdio.h>
#include "gemm_aux.cuh"
#include "operators.h"
#define SGEMM_MAX_TILE (8192)
extern "C"
void kblas_sgemm_mgpu( char transa, char transb, long m, long n, long k,
float alpha, const float* A, long lda,
const float* B, long ldb,
float beta, float* C, long ldc,
long ngpus, long* gpu_id,
long *tile)
{
hipblasStatus_t se;
int current_gpu;
hipGetDevice(¤t_gpu);
long tile_size = (*tile);
if(tile_size == -1)
{
tile_size = recommend_tile(m, n, k, ngpus, SGEMM_MAX_TILE);
(*tile) = tile_size;
}
// set to 1 to print info
long pflag = 0;
// compute #waves of full stripes
long stripes = (m + tile_size-1)/tile_size; //(m / tile_size) + (m%tile_size != 0);
long full_waves = stripes / ngpus;
long remaining_stripes = stripes % ngpus;
// compute the memory space required per gpu
// first, wrap up k to be multiple of tile_size
long k__ = ( (k + tile_size-1)/tile_size ) * tile_size;
long width = tile_size;
long height = k__; // height of a h-stripe of A or v-stripe of B
height += 2 * tile_size; // 2 extra tiles for multiplication
height += 2 * tile_size; // 2 output tiles
height = ( (height+31)/32 ) * 32; // for coalesced memory access
long mem_space = height * width;
// gpu pointers/worspace
float* gpu_ws[MAX_NGPUS];
float* a[MAX_NGPUS];
float* b[MAX_NGPUS][2];
float* c[MAX_NGPUS][2];
float* a_[MAX_NGPUS];
float* b_[MAX_NGPUS][2];
float* c_[MAX_NGPUS][2];
// streams
hipStream_t stream[MAX_NGPUS][4];
// events
long nevents = (max(n, k)+tile_size-1) / tile_size;
hipEvent_t _ain_[MAX_NGPUS][MAX_EVENTS];
hipEvent_t _bin_[MAX_NGPUS][MAX_EVENTS];
hipEvent_t _afree_[MAX_NGPUS][MAX_EVENTS];
hipEvent_t _bfree_[MAX_NGPUS][MAX_EVENTS];
hipEvent_t _cin_[MAX_NGPUS][MAX_EVENTS];
hipEvent_t _cout_[MAX_NGPUS][MAX_EVENTS];
hipEvent_t _compute_[MAX_NGPUS][MAX_EVENTS];
// allocate gpu memory
{
if(pflag)printf("memory allocation\n");
hipError_t e;
for(long i = 0; i < ngpus; i++)
{
hipSetDevice(gpu_id[i]);
e = hipMalloc((void**)&gpu_ws[i], mem_space * sizeof(float));
if(e != hipSuccess)
{
printf("ERROR: failed to allocate memory on gpu %ld \n", i);
for(long j = 0; j <= i; j++) { if(gpu_ws[i]) hipFree(gpu_ws[i]); }
exit(1);
}
}
}
// aux host pointers
// aux pointers
float *A_[MAX_NGPUS], *B_[MAX_NGPUS], *C_[MAX_NGPUS];
// Adjust pointers
{
if(pflag)printf("adjust pointers\n");
// host
for(long i = 0; i < ngpus; i++)
{
A_[i] = (float*)A;
B_[i] = (float*)B;
C_[i] = (float*)C;
}
// device
for(long i = 0; i < ngpus; i++)
{
a[i] = gpu_ws[i];
b[i][0] = a[i] + tile_size * k__;
b[i][1] = b[i][0] + tile_size * tile_size;
c[i][0] = b[i][1] + tile_size * tile_size;
c[i][1] = c[i][0] + tile_size * tile_size;
}
}
// create streams and events
{
if(pflag)printf("stream create\n");
for(long i = 0; i < ngpus; i++)
{
hipSetDevice(gpu_id[i]);
hipStreamCreate(&stream[i][0]); // compute
hipStreamCreate(&stream[i][1]); // copy a in and c out
hipStreamCreate(&stream[i][2]); // copy b in
hipStreamCreate(&stream[i][3]); // copy c in
for(long j = 0; j < nevents; j++)
{
hipEventCreate(&_ain_[i][j], hipEventDisableTiming);
hipEventCreate(&_bin_[i][j], hipEventDisableTiming);
hipEventCreate(&_afree_[i][j], hipEventDisableTiming);
hipEventCreate(&_bfree_[i][j], hipEventDisableTiming);
hipEventCreate(&_compute_[i][j], hipEventDisableTiming);
hipEventCreate(&_cin_[i][j], hipEventDisableTiming);
hipEventCreate(&_cout_[i][j], hipEventDisableTiming);
}
}
}
// set stream for the gemm calls
for(long id = 0; id < ngpus; id++)
{
if(pflag)printf("set kernel stream\n");
hipSetDevice(gpu_id[id]);
hipblasSetKernelStream(stream[id][0]);
}
// compute stepping in A and B
long step_a, step_b;
if(transa == 'n' || transa == 'N') step_a = tile_size * lda;
else step_a = tile_size;
if(transb == 'n' || transb == 'N') step_b = tile_size;
else step_b = tile_size * ldb;
// selector to switch between 2 gpu buffers
long bselect[MAX_NGPUS] = {0};
long cselect[MAX_NGPUS] = {0};
// variables that store the actual tile sizes from A, B, and C for every GPU
long ra[MAX_NGPUS] = {0};
long ca[MAX_NGPUS] = {0};
long rb[MAX_NGPUS] = {0};
long cb[MAX_NGPUS] = {0};
long rc[MAX_NGPUS] = {0};
long cc[MAX_NGPUS] = {0};
//main loop
{
if(pflag)printf("main loop\n");
long total_iterations = full_waves + (remaining_stripes!=0);
long ngpus_active;
long n_ = (n + tile_size-1) / tile_size;
long k_ = (k + tile_size-1) / tile_size;
// i - loop over full waves (m)
for(long i = 0; i < total_iterations; i++)
{
ngpus_active = ngpus;
if(i == total_iterations-1){if(remaining_stripes != 0) ngpus_active = remaining_stripes;}
// advance A_
if(pflag)printf("i = %ld, advance A_\n", i);
if(transa == 'n' || transa == 'N') for(long id = 0; id < ngpus_active; id++) {A_[id] = (float*)A + (i *ngpus + id) * tile_size;}
else for(long id = 0; id < ngpus_active; id++) { A_[id] = (float*)A + (i * ngpus + id) * tile_size * lda; }
// compute #rows of current tiles in A and C
for(long id = 0; id < ngpus_active; id++) rc[id] = min(m - (i*ngpus+id)*tile_size , tile_size);
if(transa == 'n' || transa == 'N')
for(long id = 0; id < ngpus_active; id++) ra[id] = min(m - (i*ngpus+id)*tile_size , tile_size);
else
for(long id = 0; id < ngpus_active; id++) ca[id] = min(m - (i*ngpus+id)*tile_size , tile_size);
// j - loop over (n) -
for(long j = 0; j < n_ ; j++)
{
if(pflag)printf("\t j = %ld, advance B_ and C_\n", j);
// compute #cols in current tiles in B and C
for(long id = 0; id < ngpus_active; id++) cc[id] = min(n - j*tile_size , tile_size);
if(transb == 'n' || transb == 'N')
for(long id = 0; id < ngpus_active; id++) cb[id] = min(n - j*tile_size , tile_size);
else
for(long id = 0; id < ngpus_active; id++) rb[id] = min(n - j*tile_size , tile_size);
// Advance B_
if(transb == 'n' || transb == 'N') for(long id = 0; id < ngpus_active; id++) {B_[id] = (float*)B + j * tile_size * ldb;}
else for(long id = 0; id < ngpus_active; id++) {B_[id] = (float*)B + j * tile_size;}
// Advance C_
for(long id = 0; id < ngpus_active; id++)
{
//C_[id] = (float*)C + ( (i *ngpus + id) * tile_size ) + ( j * tile_size * ldc);
C_[id] = (float*)C;
//if(transa == 'n' || transa == 'N')
C_[id] += (i *ngpus + id) * tile_size;
//else C_[id] += (i * ngpus + id) * tile_size * ldc;
//if(transb == 'n' || transb == 'N')
C_[id] += j * tile_size * ldc;
//else C_[id] += j * tile_size;
}
// copy device pointers
for(long id = 0; id < ngpus_active; id++)
{
a_[id] = a[id];
b_[id][0] = b[id][0];
b_[id][1] = b[id][1];
c_[id][0] = c[id][0];
c_[id][1] = c[id][1];
}
// if starting to compute new row of tiles in C
// copy the first tile of C in the row into devices
if(j == 0)
{
for(long id = 0; id < ngpus_active; id++)
{
hipSetDevice(gpu_id[id]);
hipStreamWaitEvent(stream[id][3], _cout_[id][cselect[id]], 0);
se = hipblasSetMatrixAsync(rc[id], cc[id], sizeof(float), C_[id], ldc, c[id][cselect[id]], tile_size, stream[id][3]);
process_error(se, "copy cin new row of tiles");
hipEventRecord(_cin_[id][cselect[id]], stream[id][3]);
}
}
if(pflag)printf("\t j = %ld, copy a, b tile in\n", j);
// prepare a first input offload
for(long id = 0; id < ngpus_active; id ++)
{
// as if p = 0 (first iteration in the inner-most loop)
if(transa == 'n' || transa == 'N') ca[id] = min(k - 0*tile_size , tile_size);
else ra[id] = min(k - 0*tile_size , tile_size);
if(transb == 'n' || transb == 'N') rb[id] = min(k - 0*tile_size , tile_size);
else cb[id] = min(k - 0*tile_size , tile_size);
hipSetDevice(gpu_id[id]);
if(j == 0)
{
hipStreamWaitEvent(stream[id][1], _afree_[id][0], 0);
se = hipblasSetMatrixAsync(ra[id], ca[id], sizeof(float), A_[id], lda, a_[id], tile_size, stream[id][1]);
char ss[100];
sprintf(ss, " i =%ld, j = %ld copy ain new row of tiles: [%ld]x[%ld]", i, j, ra[id], ca[id]);
process_error(se, ss);
hipEventRecord(_ain_[id][0], stream[id][1]);
}
hipStreamWaitEvent(stream[id][2], _bfree_[id][bselect[id]], 0);
se = hipblasSetMatrixAsync(rb[id], cb[id], sizeof(float), B_[id], ldb, b_[id][bselect[id]], tile_size, stream[id][2]);
process_error(se, "copy bin new row of tiles");
hipEventRecord(_bin_[id][bselect[id]], stream[id][2]);
}
// init b selector
//for(long id = 0; id < ngpus; id++) bselect[id] = 0;
// p - loop over k
long p = 0;
for(p = 0; p < k_; p++)
{
float beta_;
if(p == 0)beta_ = beta; else beta_ = 1;
for(long id = 0; id < ngpus_active; id++)
{
hipSetDevice(gpu_id[id]);
if(pflag)printf("\t\t p = %ld, wait for communication\n", p);
if(transa == 'n' || transa == 'N') ca[id] = min(k - p*tile_size , tile_size);
else ra[id] = min(k - p*tile_size , tile_size);
if(transb == 'n' || transb == 'N') rb[id] = min(k - p*tile_size , tile_size);
else cb[id] = min(k - p*tile_size , tile_size);
// wait for communication
//if(p == 0)hipStreamSynchronize(stream[id][3]);
//if(j == 0)hipStreamSynchronize(stream[id][1]);
//hipStreamSynchronize(stream[id][2]);
if(p == 0) hipStreamWaitEvent(stream[id][0], _cin_[id][cselect[id]], 0);
if(j == 0) hipStreamWaitEvent(stream[id][0], _ain_[id][p], 0);
hipStreamWaitEvent(stream[id][0], _bin_[id][bselect[id]], 0);
if(pflag)printf("\t\t p = %ld, gpu = %ld, invoke sgemm\n", p, id);
if(pflag)printf("\t\t ------------------------------\n");
if(pflag)printf("\t\t cselect[%ld] = %ld \n", id, cselect[id]);
long msmall = rc[id];
long nsmall = cc[id];
long ksmall;
if(transa == 'n' || transa == 'N') ksmall = ca[id];
else ksmall = ra[id];
//{
// printf("\n");
// printf("gpu %ld: [%ld][%ld] x [%ld][%ld] = [%ld][%ld]\n", id, msmall, ksmall, ksmall, nsmall, msmall, nsmall);
// //print a
// printf("A\n--------\n");
// myprint_matrix(transa, msmall, ksmall, a_[id], tile_size);
// //print b
// printf("B\n--------\n");
// myprint_matrix(transb, ksmall, nsmall, b_[id][bselect[id]], tile_size);
//}
// invoke sgemm
hipblasSgemm(transa, transb,
msmall, nsmall, ksmall,
alpha, a_[id], tile_size,
b_[id][bselect[id]], tile_size,
beta_, c_[id][cselect[id]], tile_size);
hipEventRecord(_bfree_[id][bselect[id]], stream[id][0]);
if(j == n_-1) hipEventRecord(_afree_[id][p], stream[id][0]);
if(p == k_-1) hipEventRecord(_compute_[id][j], stream[id][0]);
// prepare next input
bselect[id] = 1 - bselect[id];
a_[id] += tile_size * tile_size;
if(p != k_-1)
{
if(pflag)printf("\t\t p = %ld, prepare next input\n", p);
if(j == 0)
{
A_[id] += step_a;
if(transa == 'n' || transa == 'N')ca[id] = min(k - (p+1)*tile_size, tile_size);
else ra[id] = min(k - (p+1)*tile_size, tile_size);
hipStreamWaitEvent(stream[id][1], _afree_[id][p+1], 0);
se = hipblasSetMatrixAsync(ra[id], ca[id], sizeof(float), A_[id], lda, a_[id], tile_size, stream[id][1]);
process_error(se, "prefetch ain");
hipEventRecord(_ain_[id][p+1], stream[id][1]);
if(transa == 'n' || transa == 'N')ca[id] = min(k - (p)*tile_size, tile_size);
else ra[id] = min(k - (p)*tile_size, tile_size);
}
B_[id] += step_b;
if(transb == 'n' || transb == 'N') rb[id] = min(k - (p+1)*tile_size, tile_size);
else cb[id] = min(k - (p+1)*tile_size, tile_size);
hipStreamWaitEvent(stream[id][2], _bfree_[id][bselect[id]], 0);
se = hipblasSetMatrixAsync(rb[id], cb[id], sizeof(float), B_[id], ldb, b_[id][bselect[id]], tile_size, stream[id][2]);
process_error(se, "prefetch bin");
hipEventRecord(_bin_[id][bselect[id]], stream[id][2]);
if(transb == 'n' || transb == 'N') rb[id] = min(k - (p)*tile_size, tile_size);
else cb[id] = min(k - (p)*tile_size, tile_size);
}
if( p == 0)
{
if(j != n_-1)
{
// early copy of the next tile of C
float* Ctmp = C_[id] + tile_size * ldc;
cselect[id] = 1 - cselect[id];
// rc[id] is the same, but we need to compute cc
cc[id] = min(n - (j+1)*tile_size, tile_size);
if(pflag)printf("\t\t cselect[%ld] = %ld \n", id, cselect[id]);
hipStreamWaitEvent(stream[id][3], _cout_[id][cselect[id]], 0);
se = hipblasSetMatrixAsync(rc[id], cc[id], sizeof(float), Ctmp, ldc, c_[id][cselect[id]], tile_size, stream[id][3]);
char ss[100];
sprintf(ss, "gpu[%ld]: prefetch cin [%ld]x[%ld]", id, rc[id], cc[id]);
process_error(se, ss);
hipEventRecord(_cin_[id][cselect[id]], stream[id][3]);
cselect[id] = 1 - cselect[id];
// restore cc
cc[id] = min(n - j*tile_size, tile_size);
}
}
if(pflag)printf("\n");
}
}// p - loop over k
// copy c into cpu
for(long id = 0; id < ngpus_active; id++)
{
if(pflag)printf("i = %ld, j = %ld, gpu = %ld, copy c output\n", i, j, id);
hipSetDevice(gpu_id[id]);
//hipStreamSynchronize(stream[id][0]);
hipStreamWaitEvent(stream[id][3], _compute_[id][j], 0);
se = hipblasGetMatrixAsync(rc[id], cc[id], sizeof(float), c_[id][cselect[id]], tile_size, C_[id], ldc, stream[id][3]);
process_error(se, "read output c");
hipEventRecord(_cout_[id][cselect[id]], stream[id][3]);
cselect[id] = 1 - cselect[id];
}
}// j - loop over (n)
} // i - loop over full waves (n)
}// main compute part
// global sync point
{
if(pflag)printf("global sync\n");
for(long id = 0; id < ngpus; id++)
{
hipSetDevice(gpu_id[id]);
hipDeviceSynchronize();
}
}
// switch cublas streams to the default one
for(long id = 0; id < ngpus; id ++)
{
hipSetDevice(gpu_id[id]);
hipblasSetKernelStream(0);
}
// destroy streams
{
if(pflag)printf("destroy stream\n");
for(long i = 0; i < ngpus; i++)
{
hipSetDevice(gpu_id[i]);
hipStreamDestroy(stream[i][0]);
hipStreamDestroy(stream[i][1]);
hipStreamDestroy(stream[i][2]);
hipStreamDestroy(stream[i][3]);
}
}
// destroy events
{
for(long i = 0; i < ngpus; i++)
{
for(long j = 0; j < nevents; j++)
{
hipEventDestroy(_ain_[i][j]);
hipEventDestroy(_bin_[i][j]);
hipEventDestroy(_afree_[i][j]);
hipEventDestroy(_bfree_[i][j]);
hipEventDestroy(_compute_[i][j]);
hipEventDestroy(_cin_[i][j]);
hipEventDestroy(_cout_[i][j]);
}
}
}
// free resources
{
if(pflag)printf("free resources\n");
for(long i = 0; i < ngpus; i++)
if(gpu_ws[i]) hipFree(gpu_ws[i]);
}
// retrieve current gpu
hipSetDevice(current_gpu);
}
| 86c21893dfc69bc802231f33e63b2bd76eadd329.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/sgemm_mgpu.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ahmad Abdelfattah
* @date 2018-11-14
**/
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas.h>
#include <stdio.h>
#include "gemm_aux.cuh"
#include "operators.h"
#define SGEMM_MAX_TILE (8192)
extern "C"
void kblas_sgemm_mgpu( char transa, char transb, long m, long n, long k,
float alpha, const float* A, long lda,
const float* B, long ldb,
float beta, float* C, long ldc,
long ngpus, long* gpu_id,
long *tile)
{
cublasStatus_t se;
int current_gpu;
cudaGetDevice(¤t_gpu);
long tile_size = (*tile);
if(tile_size == -1)
{
tile_size = recommend_tile(m, n, k, ngpus, SGEMM_MAX_TILE);
(*tile) = tile_size;
}
// set to 1 to print info
long pflag = 0;
// compute #waves of full stripes
long stripes = (m + tile_size-1)/tile_size; //(m / tile_size) + (m%tile_size != 0);
long full_waves = stripes / ngpus;
long remaining_stripes = stripes % ngpus;
// compute the memory space required per gpu
// first, wrap up k to be multiple of tile_size
long k__ = ( (k + tile_size-1)/tile_size ) * tile_size;
long width = tile_size;
long height = k__; // height of a h-stripe of A or v-stripe of B
height += 2 * tile_size; // 2 extra tiles for multiplication
height += 2 * tile_size; // 2 output tiles
height = ( (height+31)/32 ) * 32; // for coalesced memory access
long mem_space = height * width;
// gpu pointers/worspace
float* gpu_ws[MAX_NGPUS];
float* a[MAX_NGPUS];
float* b[MAX_NGPUS][2];
float* c[MAX_NGPUS][2];
float* a_[MAX_NGPUS];
float* b_[MAX_NGPUS][2];
float* c_[MAX_NGPUS][2];
// streams
cudaStream_t stream[MAX_NGPUS][4];
// events
long nevents = (max(n, k)+tile_size-1) / tile_size;
cudaEvent_t _ain_[MAX_NGPUS][MAX_EVENTS];
cudaEvent_t _bin_[MAX_NGPUS][MAX_EVENTS];
cudaEvent_t _afree_[MAX_NGPUS][MAX_EVENTS];
cudaEvent_t _bfree_[MAX_NGPUS][MAX_EVENTS];
cudaEvent_t _cin_[MAX_NGPUS][MAX_EVENTS];
cudaEvent_t _cout_[MAX_NGPUS][MAX_EVENTS];
cudaEvent_t _compute_[MAX_NGPUS][MAX_EVENTS];
// allocate gpu memory
{
if(pflag)printf("memory allocation\n");
cudaError_t e;
for(long i = 0; i < ngpus; i++)
{
cudaSetDevice(gpu_id[i]);
e = cudaMalloc((void**)&gpu_ws[i], mem_space * sizeof(float));
if(e != cudaSuccess)
{
printf("ERROR: failed to allocate memory on gpu %ld \n", i);
for(long j = 0; j <= i; j++) { if(gpu_ws[i]) cudaFree(gpu_ws[i]); }
exit(1);
}
}
}
// aux host pointers
// aux pointers
float *A_[MAX_NGPUS], *B_[MAX_NGPUS], *C_[MAX_NGPUS];
// Adjust pointers
{
if(pflag)printf("adjust pointers\n");
// host
for(long i = 0; i < ngpus; i++)
{
A_[i] = (float*)A;
B_[i] = (float*)B;
C_[i] = (float*)C;
}
// device
for(long i = 0; i < ngpus; i++)
{
a[i] = gpu_ws[i];
b[i][0] = a[i] + tile_size * k__;
b[i][1] = b[i][0] + tile_size * tile_size;
c[i][0] = b[i][1] + tile_size * tile_size;
c[i][1] = c[i][0] + tile_size * tile_size;
}
}
// create streams and events
{
if(pflag)printf("stream create\n");
for(long i = 0; i < ngpus; i++)
{
cudaSetDevice(gpu_id[i]);
cudaStreamCreate(&stream[i][0]); // compute
cudaStreamCreate(&stream[i][1]); // copy a in and c out
cudaStreamCreate(&stream[i][2]); // copy b in
cudaStreamCreate(&stream[i][3]); // copy c in
for(long j = 0; j < nevents; j++)
{
cudaEventCreate(&_ain_[i][j], cudaEventDisableTiming);
cudaEventCreate(&_bin_[i][j], cudaEventDisableTiming);
cudaEventCreate(&_afree_[i][j], cudaEventDisableTiming);
cudaEventCreate(&_bfree_[i][j], cudaEventDisableTiming);
cudaEventCreate(&_compute_[i][j], cudaEventDisableTiming);
cudaEventCreate(&_cin_[i][j], cudaEventDisableTiming);
cudaEventCreate(&_cout_[i][j], cudaEventDisableTiming);
}
}
}
// set stream for the gemm calls
for(long id = 0; id < ngpus; id++)
{
if(pflag)printf("set kernel stream\n");
cudaSetDevice(gpu_id[id]);
cublasSetKernelStream(stream[id][0]);
}
// compute stepping in A and B
long step_a, step_b;
if(transa == 'n' || transa == 'N') step_a = tile_size * lda;
else step_a = tile_size;
if(transb == 'n' || transb == 'N') step_b = tile_size;
else step_b = tile_size * ldb;
// selector to switch between 2 gpu buffers
long bselect[MAX_NGPUS] = {0};
long cselect[MAX_NGPUS] = {0};
// variables that store the actual tile sizes from A, B, and C for every GPU
long ra[MAX_NGPUS] = {0};
long ca[MAX_NGPUS] = {0};
long rb[MAX_NGPUS] = {0};
long cb[MAX_NGPUS] = {0};
long rc[MAX_NGPUS] = {0};
long cc[MAX_NGPUS] = {0};
//main loop
{
if(pflag)printf("main loop\n");
long total_iterations = full_waves + (remaining_stripes!=0);
long ngpus_active;
long n_ = (n + tile_size-1) / tile_size;
long k_ = (k + tile_size-1) / tile_size;
// i - loop over full waves (m)
for(long i = 0; i < total_iterations; i++)
{
ngpus_active = ngpus;
if(i == total_iterations-1){if(remaining_stripes != 0) ngpus_active = remaining_stripes;}
// advance A_
if(pflag)printf("i = %ld, advance A_\n", i);
if(transa == 'n' || transa == 'N') for(long id = 0; id < ngpus_active; id++) {A_[id] = (float*)A + (i *ngpus + id) * tile_size;}
else for(long id = 0; id < ngpus_active; id++) { A_[id] = (float*)A + (i * ngpus + id) * tile_size * lda; }
// compute #rows of current tiles in A and C
for(long id = 0; id < ngpus_active; id++) rc[id] = min(m - (i*ngpus+id)*tile_size , tile_size);
if(transa == 'n' || transa == 'N')
for(long id = 0; id < ngpus_active; id++) ra[id] = min(m - (i*ngpus+id)*tile_size , tile_size);
else
for(long id = 0; id < ngpus_active; id++) ca[id] = min(m - (i*ngpus+id)*tile_size , tile_size);
// j - loop over (n) -
for(long j = 0; j < n_ ; j++)
{
if(pflag)printf("\t j = %ld, advance B_ and C_\n", j);
// compute #cols in current tiles in B and C
for(long id = 0; id < ngpus_active; id++) cc[id] = min(n - j*tile_size , tile_size);
if(transb == 'n' || transb == 'N')
for(long id = 0; id < ngpus_active; id++) cb[id] = min(n - j*tile_size , tile_size);
else
for(long id = 0; id < ngpus_active; id++) rb[id] = min(n - j*tile_size , tile_size);
// Advance B_
if(transb == 'n' || transb == 'N') for(long id = 0; id < ngpus_active; id++) {B_[id] = (float*)B + j * tile_size * ldb;}
else for(long id = 0; id < ngpus_active; id++) {B_[id] = (float*)B + j * tile_size;}
// Advance C_
for(long id = 0; id < ngpus_active; id++)
{
//C_[id] = (float*)C + ( (i *ngpus + id) * tile_size ) + ( j * tile_size * ldc);
C_[id] = (float*)C;
//if(transa == 'n' || transa == 'N')
C_[id] += (i *ngpus + id) * tile_size;
//else C_[id] += (i * ngpus + id) * tile_size * ldc;
//if(transb == 'n' || transb == 'N')
C_[id] += j * tile_size * ldc;
//else C_[id] += j * tile_size;
}
// copy device pointers
for(long id = 0; id < ngpus_active; id++)
{
a_[id] = a[id];
b_[id][0] = b[id][0];
b_[id][1] = b[id][1];
c_[id][0] = c[id][0];
c_[id][1] = c[id][1];
}
// if starting to compute new row of tiles in C
// copy the first tile of C in the row into devices
if(j == 0)
{
for(long id = 0; id < ngpus_active; id++)
{
cudaSetDevice(gpu_id[id]);
cudaStreamWaitEvent(stream[id][3], _cout_[id][cselect[id]], 0);
se = cublasSetMatrixAsync(rc[id], cc[id], sizeof(float), C_[id], ldc, c[id][cselect[id]], tile_size, stream[id][3]);
process_error(se, "copy cin new row of tiles");
cudaEventRecord(_cin_[id][cselect[id]], stream[id][3]);
}
}
if(pflag)printf("\t j = %ld, copy a, b tile in\n", j);
// prepare a first input offload
for(long id = 0; id < ngpus_active; id ++)
{
// as if p = 0 (first iteration in the inner-most loop)
if(transa == 'n' || transa == 'N') ca[id] = min(k - 0*tile_size , tile_size);
else ra[id] = min(k - 0*tile_size , tile_size);
if(transb == 'n' || transb == 'N') rb[id] = min(k - 0*tile_size , tile_size);
else cb[id] = min(k - 0*tile_size , tile_size);
cudaSetDevice(gpu_id[id]);
if(j == 0)
{
cudaStreamWaitEvent(stream[id][1], _afree_[id][0], 0);
se = cublasSetMatrixAsync(ra[id], ca[id], sizeof(float), A_[id], lda, a_[id], tile_size, stream[id][1]);
char ss[100];
sprintf(ss, " i =%ld, j = %ld copy ain new row of tiles: [%ld]x[%ld]", i, j, ra[id], ca[id]);
process_error(se, ss);
cudaEventRecord(_ain_[id][0], stream[id][1]);
}
cudaStreamWaitEvent(stream[id][2], _bfree_[id][bselect[id]], 0);
se = cublasSetMatrixAsync(rb[id], cb[id], sizeof(float), B_[id], ldb, b_[id][bselect[id]], tile_size, stream[id][2]);
process_error(se, "copy bin new row of tiles");
cudaEventRecord(_bin_[id][bselect[id]], stream[id][2]);
}
// init b selector
//for(long id = 0; id < ngpus; id++) bselect[id] = 0;
// p - loop over k
long p = 0;
for(p = 0; p < k_; p++)
{
float beta_;
if(p == 0)beta_ = beta; else beta_ = 1;
for(long id = 0; id < ngpus_active; id++)
{
cudaSetDevice(gpu_id[id]);
if(pflag)printf("\t\t p = %ld, wait for communication\n", p);
if(transa == 'n' || transa == 'N') ca[id] = min(k - p*tile_size , tile_size);
else ra[id] = min(k - p*tile_size , tile_size);
if(transb == 'n' || transb == 'N') rb[id] = min(k - p*tile_size , tile_size);
else cb[id] = min(k - p*tile_size , tile_size);
// wait for communication
//if(p == 0)cudaStreamSynchronize(stream[id][3]);
//if(j == 0)cudaStreamSynchronize(stream[id][1]);
//cudaStreamSynchronize(stream[id][2]);
if(p == 0) cudaStreamWaitEvent(stream[id][0], _cin_[id][cselect[id]], 0);
if(j == 0) cudaStreamWaitEvent(stream[id][0], _ain_[id][p], 0);
cudaStreamWaitEvent(stream[id][0], _bin_[id][bselect[id]], 0);
if(pflag)printf("\t\t p = %ld, gpu = %ld, invoke sgemm\n", p, id);
if(pflag)printf("\t\t ------------------------------\n");
if(pflag)printf("\t\t cselect[%ld] = %ld \n", id, cselect[id]);
long msmall = rc[id];
long nsmall = cc[id];
long ksmall;
if(transa == 'n' || transa == 'N') ksmall = ca[id];
else ksmall = ra[id];
//{
// printf("\n");
// printf("gpu %ld: [%ld][%ld] x [%ld][%ld] = [%ld][%ld]\n", id, msmall, ksmall, ksmall, nsmall, msmall, nsmall);
// //print a
// printf("A\n--------\n");
// myprint_matrix(transa, msmall, ksmall, a_[id], tile_size);
// //print b
// printf("B\n--------\n");
// myprint_matrix(transb, ksmall, nsmall, b_[id][bselect[id]], tile_size);
//}
// invoke sgemm
cublasSgemm(transa, transb,
msmall, nsmall, ksmall,
alpha, a_[id], tile_size,
b_[id][bselect[id]], tile_size,
beta_, c_[id][cselect[id]], tile_size);
cudaEventRecord(_bfree_[id][bselect[id]], stream[id][0]);
if(j == n_-1) cudaEventRecord(_afree_[id][p], stream[id][0]);
if(p == k_-1) cudaEventRecord(_compute_[id][j], stream[id][0]);
// prepare next input
bselect[id] = 1 - bselect[id];
a_[id] += tile_size * tile_size;
if(p != k_-1)
{
if(pflag)printf("\t\t p = %ld, prepare next input\n", p);
if(j == 0)
{
A_[id] += step_a;
if(transa == 'n' || transa == 'N')ca[id] = min(k - (p+1)*tile_size, tile_size);
else ra[id] = min(k - (p+1)*tile_size, tile_size);
cudaStreamWaitEvent(stream[id][1], _afree_[id][p+1], 0);
se = cublasSetMatrixAsync(ra[id], ca[id], sizeof(float), A_[id], lda, a_[id], tile_size, stream[id][1]);
process_error(se, "prefetch ain");
cudaEventRecord(_ain_[id][p+1], stream[id][1]);
if(transa == 'n' || transa == 'N')ca[id] = min(k - (p)*tile_size, tile_size);
else ra[id] = min(k - (p)*tile_size, tile_size);
}
B_[id] += step_b;
if(transb == 'n' || transb == 'N') rb[id] = min(k - (p+1)*tile_size, tile_size);
else cb[id] = min(k - (p+1)*tile_size, tile_size);
cudaStreamWaitEvent(stream[id][2], _bfree_[id][bselect[id]], 0);
se = cublasSetMatrixAsync(rb[id], cb[id], sizeof(float), B_[id], ldb, b_[id][bselect[id]], tile_size, stream[id][2]);
process_error(se, "prefetch bin");
cudaEventRecord(_bin_[id][bselect[id]], stream[id][2]);
if(transb == 'n' || transb == 'N') rb[id] = min(k - (p)*tile_size, tile_size);
else cb[id] = min(k - (p)*tile_size, tile_size);
}
if( p == 0)
{
if(j != n_-1)
{
// early copy of the next tile of C
float* Ctmp = C_[id] + tile_size * ldc;
cselect[id] = 1 - cselect[id];
// rc[id] is the same, but we need to compute cc
cc[id] = min(n - (j+1)*tile_size, tile_size);
if(pflag)printf("\t\t cselect[%ld] = %ld \n", id, cselect[id]);
cudaStreamWaitEvent(stream[id][3], _cout_[id][cselect[id]], 0);
se = cublasSetMatrixAsync(rc[id], cc[id], sizeof(float), Ctmp, ldc, c_[id][cselect[id]], tile_size, stream[id][3]);
char ss[100];
sprintf(ss, "gpu[%ld]: prefetch cin [%ld]x[%ld]", id, rc[id], cc[id]);
process_error(se, ss);
cudaEventRecord(_cin_[id][cselect[id]], stream[id][3]);
cselect[id] = 1 - cselect[id];
// restore cc
cc[id] = min(n - j*tile_size, tile_size);
}
}
if(pflag)printf("\n");
}
}// p - loop over k
// copy c into cpu
for(long id = 0; id < ngpus_active; id++)
{
if(pflag)printf("i = %ld, j = %ld, gpu = %ld, copy c output\n", i, j, id);
cudaSetDevice(gpu_id[id]);
//cudaStreamSynchronize(stream[id][0]);
cudaStreamWaitEvent(stream[id][3], _compute_[id][j], 0);
se = cublasGetMatrixAsync(rc[id], cc[id], sizeof(float), c_[id][cselect[id]], tile_size, C_[id], ldc, stream[id][3]);
process_error(se, "read output c");
cudaEventRecord(_cout_[id][cselect[id]], stream[id][3]);
cselect[id] = 1 - cselect[id];
}
}// j - loop over (n)
} // i - loop over full waves (n)
}// main compute part
// global sync point
{
if(pflag)printf("global sync\n");
for(long id = 0; id < ngpus; id++)
{
cudaSetDevice(gpu_id[id]);
cudaDeviceSynchronize();
}
}
// switch cublas streams to the default one
for(long id = 0; id < ngpus; id ++)
{
cudaSetDevice(gpu_id[id]);
cublasSetKernelStream(0);
}
// destroy streams
{
if(pflag)printf("destroy stream\n");
for(long i = 0; i < ngpus; i++)
{
cudaSetDevice(gpu_id[i]);
cudaStreamDestroy(stream[i][0]);
cudaStreamDestroy(stream[i][1]);
cudaStreamDestroy(stream[i][2]);
cudaStreamDestroy(stream[i][3]);
}
}
// destroy events
{
for(long i = 0; i < ngpus; i++)
{
for(long j = 0; j < nevents; j++)
{
cudaEventDestroy(_ain_[i][j]);
cudaEventDestroy(_bin_[i][j]);
cudaEventDestroy(_afree_[i][j]);
cudaEventDestroy(_bfree_[i][j]);
cudaEventDestroy(_compute_[i][j]);
cudaEventDestroy(_cin_[i][j]);
cudaEventDestroy(_cout_[i][j]);
}
}
}
// free resources
{
if(pflag)printf("free resources\n");
for(long i = 0; i < ngpus; i++)
if(gpu_ws[i]) cudaFree(gpu_ws[i]);
}
// retrieve current gpu
cudaSetDevice(current_gpu);
}
|
05591e73cc67713d651043e7ea72653a7f61e7fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
constexpr int THREADS = 1024;
#include <hip/hip_fp16.h>
__global__ void CustomHSquareMulKernel(float *input1, half *input2, half *output, size_t size) {
auto idx = blockIdx.x * THREADS + threadIdx.x;
if (idx < size) {
output[idx] = __float2half(input1[idx] * input1[idx] * __half2float(input2[idx]));
}
}
extern "C" int CustomHSquareMul(int nparam, void **params, int *ndims, int64_t **shapes, const char **dtypes,
void *stream, void *extra) {
hipStream_t custream = static_cast<hipStream_t>(stream);
constexpr int OUTPUT_INDEX = 2;
constexpr int TOTAL_PARAM_NUM = 3;
// Users can add any check on their need. If check fails, user can return any value larger than 0 to safely exit.
// Return value not equal to 0 will cause MindSpore to stop computing and safely exit.
// This is to check if the num of parameters the same as what the user wants.
// There are two inputs and one output, so the nparam should be 3.
if (nparam != TOTAL_PARAM_NUM) {
return 1;
}
// This is to check if the type of parameters the same as what the user wants.
if (strcmp(dtypes[0], "float32") != 0) {
return 2;
}
if (strcmp(dtypes[1], "float16") != 0) {
return 2;
}
if (strcmp(dtypes[2], "float16") != 0) {
return 2;
}
// input1's index is 0, input2's index is 1 and output's index is 2
void *input1 = params[0];
void *input2 = params[1];
void *output = params[2];
size_t size = 1;
// Cumprod of output's shape to compute elements' num
for (int i = 0; i < ndims[OUTPUT_INDEX]; i++) {
size *= shapes[OUTPUT_INDEX][i];
}
int n = size / THREADS;
// Do the computation
hipLaunchKernelGGL(( CustomHSquareMulKernel), dim3(n + 1), dim3(THREADS), 0, custream, static_cast<float *>(input1), static_cast<half *>(input2),
static_cast<half *>(output), size);
// When return 0, MindSpore will continue to run if this kernel could launch successfully.
return 0;
}
| 05591e73cc67713d651043e7ea72653a7f61e7fc.cu | /**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
constexpr int THREADS = 1024;
#include <cuda_fp16.h>
__global__ void CustomHSquareMulKernel(float *input1, half *input2, half *output, size_t size) {
auto idx = blockIdx.x * THREADS + threadIdx.x;
if (idx < size) {
output[idx] = __float2half(input1[idx] * input1[idx] * __half2float(input2[idx]));
}
}
extern "C" int CustomHSquareMul(int nparam, void **params, int *ndims, int64_t **shapes, const char **dtypes,
void *stream, void *extra) {
cudaStream_t custream = static_cast<cudaStream_t>(stream);
constexpr int OUTPUT_INDEX = 2;
constexpr int TOTAL_PARAM_NUM = 3;
// Users can add any check on their need. If check fails, user can return any value larger than 0 to safely exit.
// Return value not equal to 0 will cause MindSpore to stop computing and safely exit.
// This is to check if the num of parameters the same as what the user wants.
// There are two inputs and one output, so the nparam should be 3.
if (nparam != TOTAL_PARAM_NUM) {
return 1;
}
// This is to check if the type of parameters the same as what the user wants.
if (strcmp(dtypes[0], "float32") != 0) {
return 2;
}
if (strcmp(dtypes[1], "float16") != 0) {
return 2;
}
if (strcmp(dtypes[2], "float16") != 0) {
return 2;
}
// input1's index is 0, input2's index is 1 and output's index is 2
void *input1 = params[0];
void *input2 = params[1];
void *output = params[2];
size_t size = 1;
// Cumprod of output's shape to compute elements' num
for (int i = 0; i < ndims[OUTPUT_INDEX]; i++) {
size *= shapes[OUTPUT_INDEX][i];
}
int n = size / THREADS;
// Do the computation
CustomHSquareMulKernel<<<n + 1, THREADS, 0, custream>>>(static_cast<float *>(input1), static_cast<half *>(input2),
static_cast<half *>(output), size);
// When return 0, MindSpore will continue to run if this kernel could launch successfully.
return 0;
}
|
b3724be6fbf4ae80cf0c3fb98e10251fd0a3b86e.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2016 Mikko Ronkainen <[email protected]>
// License: MIT, see the LICENSE file.
#include "Precompiled.h"
#ifdef USE_ROCM
#include <hip/hip_runtime.h>
#include "Utils/CudaUtils.h"
#endif
#include "Core/Common.h"
#include "Utils/CudaAlloc.h"
#include "Utils/ColorGradient.h"
#include "Core/Scene.h"
#include "Core/Film.h"
#include "Math/Random.h"
using namespace Valo;
template <typename T>
CudaAlloc<T>::CudaAlloc(bool pinned_) : pinned(pinned_)
{
}
template <typename T>
CudaAlloc<T>::~CudaAlloc()
{
release();
}
template <typename T>
void CudaAlloc<T>::resize(size_t count)
{
assert(count > 0);
release();
maxCount = count;
#ifdef USE_ROCM
if (pinned)
{
CudaUtils::checkError(hipHostMalloc(&hostPtr, sizeof(T) * count), "Could not allocate pinned host memory");
if (hostPtr == nullptr)
throw std::runtime_error("Could not allocate pinned host memory");
}
else
{
hostPtr = static_cast<T*>(malloc(sizeof(T) * count));
if (hostPtr == nullptr)
throw std::runtime_error("Could not allocate host memory");
}
CudaUtils::checkError(hipMalloc(&devicePtr, sizeof(T) * count), "Could not allocate device memory");
if (devicePtr == nullptr)
throw std::runtime_error("Could not allocate device memory");
#else
hostPtr = static_cast<T*>(malloc(sizeof(T) * count));
if (hostPtr == nullptr)
throw std::runtime_error("Could not allocate host memory");
#endif
}
template <typename T>
void CudaAlloc<T>::write(T* source, size_t count)
{
assert(count <= maxCount);
memcpy(hostPtr, source, sizeof(T) * count);
#ifdef USE_ROCM
CudaUtils::checkError(hipMemcpy(devicePtr, hostPtr, sizeof(T) * count, hipMemcpyHostToDevice), "Could not write data to device");
#endif
}
template <typename T>
void CudaAlloc<T>::read(size_t count)
{
(void)count;
assert(count <= maxCount);
#ifdef USE_ROCM
CudaUtils::checkError(hipMemcpy(hostPtr, devicePtr, sizeof(T) * count, hipMemcpyDeviceToHost), "Could not read data from device");
#endif
}
template <typename T>
CUDA_CALLABLE T* CudaAlloc<T>::getPtr() const
{
#ifdef USE_ROCM
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ > 0))
return devicePtr;
#else
return hostPtr;
#endif
#else
return hostPtr;
#endif
}
template <typename T>
T* CudaAlloc<T>::getHostPtr() const
{
return hostPtr;
}
template <typename T>
T* CudaAlloc<T>::getDevicePtr() const
{
return devicePtr;
}
template <typename T>
void CudaAlloc<T>::release()
{
maxCount = 0;
#ifdef USE_ROCM
if (hostPtr != nullptr)
{
if (pinned)
CudaUtils::checkError(hipHostFree(hostPtr), "Could not free pinned host memory");
else
free(hostPtr);
hostPtr = nullptr;
}
if (devicePtr != nullptr)
{
CudaUtils::checkError(hipFree(devicePtr), "Could not free device memory");
devicePtr = nullptr;
}
#else
if (hostPtr != nullptr)
{
free(hostPtr);
hostPtr = nullptr;
}
#endif
}
namespace Valo
{
template class CudaAlloc<uint32_t>;
template class CudaAlloc<Scene>;
template class CudaAlloc<Film>;
template class CudaAlloc<Image>;
template class CudaAlloc<Texture>;
template class CudaAlloc<Material>;
template class CudaAlloc<Triangle>;
template class CudaAlloc<BVHNode>;
template class CudaAlloc<BVHNodeSOA<4>>;
template class CudaAlloc<BVHNodeSOA<8>>;
template class CudaAlloc<BVHNodeSOA<16>>;
template class CudaAlloc<TriangleSOA<4>>;
template class CudaAlloc<TriangleSOA<8>>;
template class CudaAlloc<TriangleSOA<16>>;
template class CudaAlloc<RandomGeneratorState>;
template class CudaAlloc<ColorGradientSegment>;
}
| b3724be6fbf4ae80cf0c3fb98e10251fd0a3b86e.cu | // Copyright © 2016 Mikko Ronkainen <[email protected]>
// License: MIT, see the LICENSE file.
#include "Precompiled.h"
#ifdef USE_CUDA
#include <cuda_runtime.h>
#include "Utils/CudaUtils.h"
#endif
#include "Core/Common.h"
#include "Utils/CudaAlloc.h"
#include "Utils/ColorGradient.h"
#include "Core/Scene.h"
#include "Core/Film.h"
#include "Math/Random.h"
using namespace Valo;
template <typename T>
CudaAlloc<T>::CudaAlloc(bool pinned_) : pinned(pinned_)
{
}
template <typename T>
CudaAlloc<T>::~CudaAlloc()
{
release();
}
template <typename T>
void CudaAlloc<T>::resize(size_t count)
{
assert(count > 0);
release();
maxCount = count;
#ifdef USE_CUDA
if (pinned)
{
CudaUtils::checkError(cudaMallocHost(&hostPtr, sizeof(T) * count), "Could not allocate pinned host memory");
if (hostPtr == nullptr)
throw std::runtime_error("Could not allocate pinned host memory");
}
else
{
hostPtr = static_cast<T*>(malloc(sizeof(T) * count));
if (hostPtr == nullptr)
throw std::runtime_error("Could not allocate host memory");
}
CudaUtils::checkError(cudaMalloc(&devicePtr, sizeof(T) * count), "Could not allocate device memory");
if (devicePtr == nullptr)
throw std::runtime_error("Could not allocate device memory");
#else
hostPtr = static_cast<T*>(malloc(sizeof(T) * count));
if (hostPtr == nullptr)
throw std::runtime_error("Could not allocate host memory");
#endif
}
template <typename T>
void CudaAlloc<T>::write(T* source, size_t count)
{
assert(count <= maxCount);
memcpy(hostPtr, source, sizeof(T) * count);
#ifdef USE_CUDA
CudaUtils::checkError(cudaMemcpy(devicePtr, hostPtr, sizeof(T) * count, cudaMemcpyHostToDevice), "Could not write data to device");
#endif
}
template <typename T>
void CudaAlloc<T>::read(size_t count)
{
(void)count;
assert(count <= maxCount);
#ifdef USE_CUDA
CudaUtils::checkError(cudaMemcpy(hostPtr, devicePtr, sizeof(T) * count, cudaMemcpyDeviceToHost), "Could not read data from device");
#endif
}
template <typename T>
CUDA_CALLABLE T* CudaAlloc<T>::getPtr() const
{
#ifdef USE_CUDA
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ > 0))
return devicePtr;
#else
return hostPtr;
#endif
#else
return hostPtr;
#endif
}
template <typename T>
T* CudaAlloc<T>::getHostPtr() const
{
return hostPtr;
}
template <typename T>
T* CudaAlloc<T>::getDevicePtr() const
{
return devicePtr;
}
template <typename T>
void CudaAlloc<T>::release()
{
maxCount = 0;
#ifdef USE_CUDA
if (hostPtr != nullptr)
{
if (pinned)
CudaUtils::checkError(cudaFreeHost(hostPtr), "Could not free pinned host memory");
else
free(hostPtr);
hostPtr = nullptr;
}
if (devicePtr != nullptr)
{
CudaUtils::checkError(cudaFree(devicePtr), "Could not free device memory");
devicePtr = nullptr;
}
#else
if (hostPtr != nullptr)
{
free(hostPtr);
hostPtr = nullptr;
}
#endif
}
namespace Valo
{
template class CudaAlloc<uint32_t>;
template class CudaAlloc<Scene>;
template class CudaAlloc<Film>;
template class CudaAlloc<Image>;
template class CudaAlloc<Texture>;
template class CudaAlloc<Material>;
template class CudaAlloc<Triangle>;
template class CudaAlloc<BVHNode>;
template class CudaAlloc<BVHNodeSOA<4>>;
template class CudaAlloc<BVHNodeSOA<8>>;
template class CudaAlloc<BVHNodeSOA<16>>;
template class CudaAlloc<TriangleSOA<4>>;
template class CudaAlloc<TriangleSOA<8>>;
template class CudaAlloc<TriangleSOA<16>>;
template class CudaAlloc<RandomGeneratorState>;
template class CudaAlloc<ColorGradientSegment>;
}
|
fc62264bbcbe21eff81e7cb279eb0c6f928562dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void SetMatrixVauleMinMaxY( float* matrix, int cols, int size, int id_min, int id_max, float value)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
int id_row = id / cols;
if (id_row >= id_min && id_row <= id_max && id < size)
matrix[id] = value;
} | fc62264bbcbe21eff81e7cb279eb0c6f928562dd.cu | #include "includes.h"
__global__ void SetMatrixVauleMinMaxY( float* matrix, int cols, int size, int id_min, int id_max, float value)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
int id_row = id / cols;
if (id_row >= id_min && id_row <= id_max && id < size)
matrix[id] = value;
} |
739165515a696f3704edc8232999f8f2619ed1aa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sobelEdgeDetectionWithRegisters.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
int *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int thresh = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sobelEdgeDetectionWithRegisters), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,width,height,thresh);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sobelEdgeDetectionWithRegisters), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,width,height,thresh);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sobelEdgeDetectionWithRegisters), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,width,height,thresh);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 739165515a696f3704edc8232999f8f2619ed1aa.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sobelEdgeDetectionWithRegisters.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
int *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int thresh = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sobelEdgeDetectionWithRegisters<<<gridBlock,threadBlock>>>(input,output,width,height,thresh);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sobelEdgeDetectionWithRegisters<<<gridBlock,threadBlock>>>(input,output,width,height,thresh);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sobelEdgeDetectionWithRegisters<<<gridBlock,threadBlock>>>(input,output,width,height,thresh);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bcd87235775156ad86593edb30cfba074c1b12bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/strings/detail/concatenate.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <thrust/transform_scan.h>
namespace cudf {
namespace strings {
namespace detail {
// Benchmark data, shared at https://github.com/rapidsai/cudf/pull/4703, shows
// that the single kernel optimization generally performs better, but when the
// number of chars/col is beyond a certain threshold memcpy performs better.
// This heuristic estimates which strategy will give better performance by
// comparing the mean chars/col with values from the above table.
constexpr bool use_fused_kernel_heuristic(bool const has_nulls,
size_t const total_bytes,
size_t const num_columns)
{
return has_nulls ? total_bytes < num_columns * 1572864 // midpoint of 1048576 and 2097152
: total_bytes < num_columns * 393216; // midpoint of 262144 and 524288
}
// Using a functor instead of a lambda as a workaround for:
// error: The enclosing parent function ("create_strings_device_views") for an
// extended __device__ lambda must not have deduced return type
struct chars_size_transform {
__device__ size_t operator()(column_device_view const& col) const
{
if (col.size() > 0) {
constexpr auto offsets_index = strings_column_view::offsets_column_index;
auto d_offsets = col.child(offsets_index).data<int32_t>();
return d_offsets[col.size() + col.offset()] - d_offsets[col.offset()];
} else {
return 0;
}
}
};
auto create_strings_device_views(host_span<column_view const> views, rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
// Assemble contiguous array of device views
std::unique_ptr<rmm::device_buffer> device_view_owners;
column_device_view* device_views_ptr;
std::tie(device_view_owners, device_views_ptr) =
contiguous_copy_column_device_views<column_device_view>(views, stream);
// Compute the partition offsets and size of offset column
// Note: Using 64-bit size_t so we can detect overflow of 32-bit size_type
auto input_offsets = std::vector<size_t>(views.size() + 1);
auto offset_it = std::next(input_offsets.begin());
thrust::transform(
thrust::host, views.begin(), views.end(), offset_it, [](auto const& col) -> size_t {
return static_cast<size_t>(col.size());
});
thrust::inclusive_scan(thrust::host, offset_it, input_offsets.end(), offset_it);
auto d_input_offsets = cudf::detail::make_device_uvector_async(input_offsets, stream);
auto const output_size = input_offsets.back();
// Compute the partition offsets and size of chars column
// Note: Using 64-bit size_t so we can detect overflow of 32-bit size_type
auto d_partition_offsets = rmm::device_uvector<size_t>(views.size() + 1, stream);
d_partition_offsets.set_element_to_zero_async(0, stream); // zero first element
thrust::transform_inclusive_scan(rmm::exec_policy(stream),
device_views_ptr,
device_views_ptr + views.size(),
std::next(d_partition_offsets.begin()),
chars_size_transform{},
thrust::plus<size_t>{});
auto const output_chars_size = d_partition_offsets.back_element(stream);
stream.synchronize(); // ensure copy of output_chars_size is complete before returning
return std::make_tuple(std::move(device_view_owners),
device_views_ptr,
std::move(d_input_offsets),
std::move(d_partition_offsets),
output_size,
output_chars_size);
}
template <size_type block_size, bool Nullable>
__global__ void fused_concatenate_string_offset_kernel(column_device_view const* input_views,
size_t const* input_offsets,
size_t const* partition_offsets,
size_type const num_input_views,
size_type const output_size,
int32_t* output_data,
bitmask_type* output_mask,
size_type* out_valid_count)
{
size_type output_index = threadIdx.x + blockIdx.x * blockDim.x;
size_type warp_valid_count = 0;
unsigned active_mask;
if (Nullable) { active_mask = __ballot_sync(0xFFFF'FFFF, output_index < output_size); }
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
// thrust::prev isn't in CUDA 10.0, so subtracting 1 here instead
auto const offset_it =
-1 + thrust::upper_bound(
thrust::seq, input_offsets, input_offsets + num_input_views, output_index);
size_type const partition_index = offset_it - input_offsets;
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
constexpr auto offsets_child = strings_column_view::offsets_column_index;
auto const* input_data = input_view.child(offsets_child).data<int32_t>();
output_data[output_index] =
input_data[offset_index + input_view.offset()] // handle parent offset
- input_data[input_view.offset()] // subract first offset if non-zero
+ partition_offsets[partition_index]; // add offset of source column
if (Nullable) {
bool const bit_is_set = input_view.is_valid(offset_index);
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
// First thread writes bitmask word
if (threadIdx.x % cudf::detail::warp_size == 0) {
output_mask[word_index(output_index)] = new_word;
}
warp_valid_count += __popc(new_word);
}
output_index += blockDim.x * gridDim.x;
if (Nullable) { active_mask = __ballot_sync(active_mask, output_index < output_size); }
}
// Fill final offsets index with total size of char data
if (output_index == output_size) {
output_data[output_size] = partition_offsets[num_input_views];
}
if (Nullable) {
using cudf::detail::single_lane_block_sum_reduce;
auto block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count, block_valid_count); }
}
}
__global__ void fused_concatenate_string_chars_kernel(column_device_view const* input_views,
size_t const* partition_offsets,
size_type const num_input_views,
size_type const output_size,
char* output_data)
{
size_type output_index = threadIdx.x + blockIdx.x * blockDim.x;
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
// thrust::prev isn't in CUDA 10.0, so subtracting 1 here instead
auto const offset_it =
-1 + thrust::upper_bound(
thrust::seq, partition_offsets, partition_offsets + num_input_views, output_index);
size_type const partition_index = offset_it - partition_offsets;
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
constexpr auto offsets_child = strings_column_view::offsets_column_index;
auto const* input_offsets_data = input_view.child(offsets_child).data<int32_t>();
constexpr auto chars_child = strings_column_view::chars_column_index;
auto const* input_chars_data = input_view.child(chars_child).data<char>();
auto const first_char = input_offsets_data[input_view.offset()];
output_data[output_index] = input_chars_data[offset_index + first_char];
output_index += blockDim.x * gridDim.x;
}
}
std::unique_ptr<column> concatenate(host_span<column_view const> columns,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
// Compute output sizes
auto const device_views = create_strings_device_views(columns, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_input_offsets = std::get<2>(device_views);
auto const& d_partition_offsets = std::get<3>(device_views);
auto const strings_count = std::get<4>(device_views);
auto const total_bytes = std::get<5>(device_views);
auto const offsets_count = strings_count + 1;
if (strings_count == 0) { return make_empty_column(type_id::STRING); }
CUDF_EXPECTS(offsets_count <= static_cast<std::size_t>(std::numeric_limits<size_type>::max()),
"total number of strings is too large for cudf column");
CUDF_EXPECTS(total_bytes <= static_cast<std::size_t>(std::numeric_limits<size_type>::max()),
"total size of strings is too large for cudf column");
bool const has_nulls =
std::any_of(columns.begin(), columns.end(), [](auto const& col) { return col.has_nulls(); });
// create chars column
auto chars_column = create_chars_child_column(total_bytes, stream, mr);
auto d_new_chars = chars_column->mutable_view().data<char>();
chars_column->set_null_count(0);
// create offsets column
auto offsets_column = make_numeric_column(
data_type{type_id::INT32}, offsets_count, mask_state::UNALLOCATED, stream, mr);
auto d_new_offsets = offsets_column->mutable_view().data<int32_t>();
offsets_column->set_null_count(0);
rmm::device_buffer null_mask{0, stream, mr};
size_type null_count{};
if (has_nulls) {
null_mask =
cudf::detail::create_null_mask(strings_count, mask_state::UNINITIALIZED, stream, mr);
}
{ // Copy offsets columns with single kernel launch
rmm::device_scalar<size_type> d_valid_count(0, stream);
constexpr size_type block_size{256};
cudf::detail::grid_1d config(offsets_count, block_size);
auto const kernel = has_nulls ? fused_concatenate_string_offset_kernel<block_size, true>
: fused_concatenate_string_offset_kernel<block_size, false>;
hipLaunchKernelGGL(( kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(),
d_views,
d_input_offsets.data(),
d_partition_offsets.data(),
static_cast<size_type>(columns.size()),
strings_count,
d_new_offsets,
reinterpret_cast<bitmask_type*>(null_mask.data()),
d_valid_count.data());
if (has_nulls) { null_count = strings_count - d_valid_count.value(stream); }
}
if (total_bytes > 0) {
// Use a heuristic to guess when the fused kernel will be faster than memcpy
if (use_fused_kernel_heuristic(has_nulls, total_bytes, columns.size())) {
// Use single kernel launch to copy chars columns
constexpr size_type block_size{256};
cudf::detail::grid_1d config(total_bytes, block_size);
auto const kernel = fused_concatenate_string_chars_kernel;
hipLaunchKernelGGL(( kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(),
d_views,
d_partition_offsets.data(),
static_cast<size_type>(columns.size()),
total_bytes,
d_new_chars);
} else {
// Memcpy each input chars column (more efficient for very large strings)
for (auto column = columns.begin(); column != columns.end(); ++column) {
size_type column_size = column->size();
if (column_size == 0) // nothing to do
continue; // empty column may not have children
size_type column_offset = column->offset();
column_view offsets_child = column->child(strings_column_view::offsets_column_index);
column_view chars_child = column->child(strings_column_view::chars_column_index);
auto bytes_offset =
cudf::detail::get_value<offset_type>(offsets_child, column_offset, stream);
// copy the chars column data
auto d_chars = chars_child.data<char>() + bytes_offset;
auto const bytes =
cudf::detail::get_value<offset_type>(offsets_child, column_size + column_offset, stream) -
bytes_offset;
CUDA_TRY(
hipMemcpyAsync(d_new_chars, d_chars, bytes, hipMemcpyDeviceToDevice, stream.value()));
// get ready for the next column
d_new_chars += bytes;
}
}
}
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask));
}
} // namespace detail
} // namespace strings
} // namespace cudf
| bcd87235775156ad86593edb30cfba074c1b12bb.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/strings/detail/concatenate.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <thrust/transform_scan.h>
namespace cudf {
namespace strings {
namespace detail {
// Benchmark data, shared at https://github.com/rapidsai/cudf/pull/4703, shows
// that the single kernel optimization generally performs better, but when the
// number of chars/col is beyond a certain threshold memcpy performs better.
// This heuristic estimates which strategy will give better performance by
// comparing the mean chars/col with values from the above table.
constexpr bool use_fused_kernel_heuristic(bool const has_nulls,
size_t const total_bytes,
size_t const num_columns)
{
return has_nulls ? total_bytes < num_columns * 1572864 // midpoint of 1048576 and 2097152
: total_bytes < num_columns * 393216; // midpoint of 262144 and 524288
}
// Using a functor instead of a lambda as a workaround for:
// error: The enclosing parent function ("create_strings_device_views") for an
// extended __device__ lambda must not have deduced return type
struct chars_size_transform {
__device__ size_t operator()(column_device_view const& col) const
{
if (col.size() > 0) {
constexpr auto offsets_index = strings_column_view::offsets_column_index;
auto d_offsets = col.child(offsets_index).data<int32_t>();
return d_offsets[col.size() + col.offset()] - d_offsets[col.offset()];
} else {
return 0;
}
}
};
auto create_strings_device_views(host_span<column_view const> views, rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
// Assemble contiguous array of device views
std::unique_ptr<rmm::device_buffer> device_view_owners;
column_device_view* device_views_ptr;
std::tie(device_view_owners, device_views_ptr) =
contiguous_copy_column_device_views<column_device_view>(views, stream);
// Compute the partition offsets and size of offset column
// Note: Using 64-bit size_t so we can detect overflow of 32-bit size_type
auto input_offsets = std::vector<size_t>(views.size() + 1);
auto offset_it = std::next(input_offsets.begin());
thrust::transform(
thrust::host, views.begin(), views.end(), offset_it, [](auto const& col) -> size_t {
return static_cast<size_t>(col.size());
});
thrust::inclusive_scan(thrust::host, offset_it, input_offsets.end(), offset_it);
auto d_input_offsets = cudf::detail::make_device_uvector_async(input_offsets, stream);
auto const output_size = input_offsets.back();
// Compute the partition offsets and size of chars column
// Note: Using 64-bit size_t so we can detect overflow of 32-bit size_type
auto d_partition_offsets = rmm::device_uvector<size_t>(views.size() + 1, stream);
d_partition_offsets.set_element_to_zero_async(0, stream); // zero first element
thrust::transform_inclusive_scan(rmm::exec_policy(stream),
device_views_ptr,
device_views_ptr + views.size(),
std::next(d_partition_offsets.begin()),
chars_size_transform{},
thrust::plus<size_t>{});
auto const output_chars_size = d_partition_offsets.back_element(stream);
stream.synchronize(); // ensure copy of output_chars_size is complete before returning
return std::make_tuple(std::move(device_view_owners),
device_views_ptr,
std::move(d_input_offsets),
std::move(d_partition_offsets),
output_size,
output_chars_size);
}
template <size_type block_size, bool Nullable>
__global__ void fused_concatenate_string_offset_kernel(column_device_view const* input_views,
size_t const* input_offsets,
size_t const* partition_offsets,
size_type const num_input_views,
size_type const output_size,
int32_t* output_data,
bitmask_type* output_mask,
size_type* out_valid_count)
{
size_type output_index = threadIdx.x + blockIdx.x * blockDim.x;
size_type warp_valid_count = 0;
unsigned active_mask;
if (Nullable) { active_mask = __ballot_sync(0xFFFF'FFFF, output_index < output_size); }
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
// thrust::prev isn't in CUDA 10.0, so subtracting 1 here instead
auto const offset_it =
-1 + thrust::upper_bound(
thrust::seq, input_offsets, input_offsets + num_input_views, output_index);
size_type const partition_index = offset_it - input_offsets;
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
constexpr auto offsets_child = strings_column_view::offsets_column_index;
auto const* input_data = input_view.child(offsets_child).data<int32_t>();
output_data[output_index] =
input_data[offset_index + input_view.offset()] // handle parent offset
- input_data[input_view.offset()] // subract first offset if non-zero
+ partition_offsets[partition_index]; // add offset of source column
if (Nullable) {
bool const bit_is_set = input_view.is_valid(offset_index);
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
// First thread writes bitmask word
if (threadIdx.x % cudf::detail::warp_size == 0) {
output_mask[word_index(output_index)] = new_word;
}
warp_valid_count += __popc(new_word);
}
output_index += blockDim.x * gridDim.x;
if (Nullable) { active_mask = __ballot_sync(active_mask, output_index < output_size); }
}
// Fill final offsets index with total size of char data
if (output_index == output_size) {
output_data[output_size] = partition_offsets[num_input_views];
}
if (Nullable) {
using cudf::detail::single_lane_block_sum_reduce;
auto block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count, block_valid_count); }
}
}
__global__ void fused_concatenate_string_chars_kernel(column_device_view const* input_views,
size_t const* partition_offsets,
size_type const num_input_views,
size_type const output_size,
char* output_data)
{
size_type output_index = threadIdx.x + blockIdx.x * blockDim.x;
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
// thrust::prev isn't in CUDA 10.0, so subtracting 1 here instead
auto const offset_it =
-1 + thrust::upper_bound(
thrust::seq, partition_offsets, partition_offsets + num_input_views, output_index);
size_type const partition_index = offset_it - partition_offsets;
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
constexpr auto offsets_child = strings_column_view::offsets_column_index;
auto const* input_offsets_data = input_view.child(offsets_child).data<int32_t>();
constexpr auto chars_child = strings_column_view::chars_column_index;
auto const* input_chars_data = input_view.child(chars_child).data<char>();
auto const first_char = input_offsets_data[input_view.offset()];
output_data[output_index] = input_chars_data[offset_index + first_char];
output_index += blockDim.x * gridDim.x;
}
}
std::unique_ptr<column> concatenate(host_span<column_view const> columns,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
// Compute output sizes
auto const device_views = create_strings_device_views(columns, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_input_offsets = std::get<2>(device_views);
auto const& d_partition_offsets = std::get<3>(device_views);
auto const strings_count = std::get<4>(device_views);
auto const total_bytes = std::get<5>(device_views);
auto const offsets_count = strings_count + 1;
if (strings_count == 0) { return make_empty_column(type_id::STRING); }
CUDF_EXPECTS(offsets_count <= static_cast<std::size_t>(std::numeric_limits<size_type>::max()),
"total number of strings is too large for cudf column");
CUDF_EXPECTS(total_bytes <= static_cast<std::size_t>(std::numeric_limits<size_type>::max()),
"total size of strings is too large for cudf column");
bool const has_nulls =
std::any_of(columns.begin(), columns.end(), [](auto const& col) { return col.has_nulls(); });
// create chars column
auto chars_column = create_chars_child_column(total_bytes, stream, mr);
auto d_new_chars = chars_column->mutable_view().data<char>();
chars_column->set_null_count(0);
// create offsets column
auto offsets_column = make_numeric_column(
data_type{type_id::INT32}, offsets_count, mask_state::UNALLOCATED, stream, mr);
auto d_new_offsets = offsets_column->mutable_view().data<int32_t>();
offsets_column->set_null_count(0);
rmm::device_buffer null_mask{0, stream, mr};
size_type null_count{};
if (has_nulls) {
null_mask =
cudf::detail::create_null_mask(strings_count, mask_state::UNINITIALIZED, stream, mr);
}
{ // Copy offsets columns with single kernel launch
rmm::device_scalar<size_type> d_valid_count(0, stream);
constexpr size_type block_size{256};
cudf::detail::grid_1d config(offsets_count, block_size);
auto const kernel = has_nulls ? fused_concatenate_string_offset_kernel<block_size, true>
: fused_concatenate_string_offset_kernel<block_size, false>;
kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>(
d_views,
d_input_offsets.data(),
d_partition_offsets.data(),
static_cast<size_type>(columns.size()),
strings_count,
d_new_offsets,
reinterpret_cast<bitmask_type*>(null_mask.data()),
d_valid_count.data());
if (has_nulls) { null_count = strings_count - d_valid_count.value(stream); }
}
if (total_bytes > 0) {
// Use a heuristic to guess when the fused kernel will be faster than memcpy
if (use_fused_kernel_heuristic(has_nulls, total_bytes, columns.size())) {
// Use single kernel launch to copy chars columns
constexpr size_type block_size{256};
cudf::detail::grid_1d config(total_bytes, block_size);
auto const kernel = fused_concatenate_string_chars_kernel;
kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>(
d_views,
d_partition_offsets.data(),
static_cast<size_type>(columns.size()),
total_bytes,
d_new_chars);
} else {
// Memcpy each input chars column (more efficient for very large strings)
for (auto column = columns.begin(); column != columns.end(); ++column) {
size_type column_size = column->size();
if (column_size == 0) // nothing to do
continue; // empty column may not have children
size_type column_offset = column->offset();
column_view offsets_child = column->child(strings_column_view::offsets_column_index);
column_view chars_child = column->child(strings_column_view::chars_column_index);
auto bytes_offset =
cudf::detail::get_value<offset_type>(offsets_child, column_offset, stream);
// copy the chars column data
auto d_chars = chars_child.data<char>() + bytes_offset;
auto const bytes =
cudf::detail::get_value<offset_type>(offsets_child, column_size + column_offset, stream) -
bytes_offset;
CUDA_TRY(
cudaMemcpyAsync(d_new_chars, d_chars, bytes, cudaMemcpyDeviceToDevice, stream.value()));
// get ready for the next column
d_new_chars += bytes;
}
}
}
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask));
}
} // namespace detail
} // namespace strings
} // namespace cudf
|
4404151defecef6992aa32bd2508d80ac2badb02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <sys/stat.h>
#include <dirent.h>
#include <string.h>
#include <stdio.h>
#include <time.h>
#include <string>
//#include <pthread.h>
//#include "thpool.h"
struct solver { // The variables in the struct are described in the allocate procedure
int* DB, nVars, nClauses, mem_used, mem_fixed, mem_max, maxLemmas, nLemmas, * buffer, nConflicts, * model,
* reason, * falseStack, * _false, * first, * forced, * processed, * assigned, * next, * prev, head, res, fast, slow,
result, file_id;
};
typedef struct {
int files_count;
double parse_time;
double init_time;
double solve_time;
double tot_time;
} Metrics;
enum { END = -9, UNSAT = 0, SAT = 1, MARK = 2, IMPLIED = 6 };
void deviceInfo(){
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
void showMem(){
// show memory usage of GPU
size_t free_byte ;
size_t total_byte ;
hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ) ;
if ( hipSuccess != cuda_status ){
printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) );
exit(1);
}
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n",
used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
//showMem();
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__
int* getMemory(struct solver* S, int mem_size) { // Allocate memory of size mem_size
if (S->mem_used + mem_size > S->mem_max) { // In case the code is used within a code base
printf("c out of memory\n");
return 0;
}
int* store = (S->DB + S->mem_used); // Compute a pointer to the new memory location
S->mem_used += mem_size; // Update the size of the used memory
return store;
} // Return the pointer
__device__
void unassign(struct solver* S, int lit) { S->_false[lit] = 0; } // Unassign the literal
__device__
void restart(struct solver* S) { // Perform a restart (i.e., unassign all variables)
while (S->assigned > S->forced) unassign(S, *(--S->assigned)); // Remove all unforced false lits from falseStack
S->processed = S->forced;
} // Reset the processed pointer
__device__
void assign(struct solver* S, int* reason, int forced) { // Make the first literal of the reason true
int lit = reason[0]; // Let lit be the first ltieral in the reason
S->_false[-lit] = forced ? IMPLIED : 1; // Mark lit as true and IMPLIED if forced
*(S->assigned++) = -lit; // Push it on the assignment stack
S->reason[abs(lit)] = 1 + (int)((reason)-S->DB); // Set the reason clause of lit
S->model[abs(lit)] = (lit > 0);
} // Mark the literal as true in the model
__device__
void addWatch(struct solver* S, int lit, int mem) { // Add a watch pointer to a cfor entry function '_Z5solveP6solver' containing lit
S->DB[mem] = S->first[lit]; S->first[lit] = mem;
} // By updating the database afor entry function '_Z5solveP6solver'e pointers
__device__
int* addClause(struct solver* S, int* buffer, int size, int irr) { // Adds a clause stored in *in of size size
int i, used = S->mem_used; // Store a pointer to the beginning of the clause
int* clause = getMemory(S, size + 3) + 2; // Allocate memory for the clause in the database
if (size > 1) {
addWatch(S, buffer[0], used); // If the clause is not unit, then add
addWatch(S, buffer[1], used + 1);
} // Two watch pointers to the datastructure
for (i = 0; i < size; i++) clause[i] = buffer[i]; clause[i] = 0; // Copy the clause from the buffer to the database
if (irr) S->mem_fixed = S->mem_used; else S->nLemmas++; // Update the statistics
return clause;
} // Return the pointer to the clause is the database
__device__
void reduceDB(struct solver* S, int k) { // Removes "less useful" lemmas from DB
//printf("Start reduceDB function\n");
while (S->nLemmas > S->maxLemmas) S->maxLemmas += 300; // Allow more lemmas in the future
S->nLemmas = 0; // Reset the number of lemmas
int i; for (i = -S->nVars; i <= S->nVars; i++) { // Loop over the variables
if (i == 0) continue; int* watch = &S->first[i]; // Get the pointer to the first watched clause
while (*watch != END) // As long as there are watched clauses
if (*watch < S->mem_fixed) watch = (S->DB + *watch); // Remove the watch if it points to a lemma
else *watch = S->DB[*watch];
} // Otherwise (meaning an input clause) go to next watch
int old_used = S->mem_used; S->mem_used = S->mem_fixed; // Virtually remove all lemmas
for (i = S->mem_fixed + 2; i < old_used; i += 3) { // While the old memory contains lemmas
int count = 0, head = i; // Get the lemma to which the head is pointing
while (S->DB[i]) {
int lit = S->DB[i++]; // Count the number of literals
if ((lit > 0) == S->model[abs(lit)]) count++;
} // That are satisfied by the current model
if (count < k) addClause(S, S->DB + head, i - head, 0);
}
} // If the latter is smaller than k, add it back
__device__
void bump(struct solver* S, int lit) { // Move the variable to the front of the decision list
//printf("Start bump function\n");
if (S->_false[lit] != IMPLIED) {
S->_false[lit] = MARK; // MARK the literal as involved if not a top-level unit
int var = abs(lit); if (var != S->head) { // In case var is not already the head of the list
S->prev[S->next[var]] = S->prev[var]; // Update the prev link, and
S->next[S->prev[var]] = S->next[var]; // Update the next link, and
S->next[S->head] = var; // Add a next link to the head, and
S->prev[var] = S->head; S->head = var;
}
}
} // Make var the new head
__device__
int implied(struct solver* S, int lit) { // Check if lit(eral) is implied by MARK literals
// printf("Start implied function\n");
if (S->_false[lit] > MARK) return (S->_false[lit] & MARK); // If checked before return old result
if (!S->reason[abs(lit)]) return 0; // In case lit is a decision, it is not implied
int* p = (S->DB + S->reason[abs(lit)] - 1); // Get the reason of lit(eral)
while (*(++p)) // While there are literals in the reason
if ((S->_false[*p] ^ MARK) && !implied(S, *p)) { // Recursively check if non-MARK literals are implied
S->_false[lit] = IMPLIED - 1; return 0;
} // Mark and return not implied (denoted by IMPLIED - 1)
S->_false[lit] = IMPLIED; return 1;
} // Mark and return that the literal is implied
__device__
int* analyze(struct solver* S, int* clause) { // Compute a resolvent from falsified clause
// printf("Start analyze\n");
S->res++; S->nConflicts++; // Bump restarts and update the statistic
while (*clause) bump(S, *(clause++)); // MARK all literals in the falsified clause
while (S->reason[abs(*(--S->assigned))]) { // Loop on variables on falseStack until the last decision
if (S->_false[*S->assigned] == MARK) { // If the tail of the stack is MARK
int* check = S->assigned; // Pointer to check if first-UIP is reached
while (S->_false[*(--check)] != MARK) // Check for a MARK literal before decision
if (!S->reason[abs(*check)]) goto build; // Otherwise it is the first-UIP so break
clause = S->DB + S->reason[abs(*S->assigned)]; // Get the reason and ignore first literal
while (*clause) bump(S, *(clause++));
} // MARK all literals in reason
unassign(S, *S->assigned);
} // Unassign the tail of the stack
build:; int size = 0, lbd = 0, flag = 0; // Build conflict clause; Empty the clause buffer
int* p = S->processed = S->assigned; // Loop from tail to front
while (p >= S->forced) { // Only literals on the stack can be MARKed
if ((S->_false[*p] == MARK) && !implied(S, *p)) { // If MARKed and not implied
S->buffer[size++] = *p; flag = 1;
} // Add literal to conflict clause buffer
if (!S->reason[abs(*p)]) {
lbd += flag; flag = 0; // Increase LBD for a decision with a true flag
if (size == 1) S->processed = p;
} // And update the processed pointer
S->_false[*(p--)] = 1;
} // Reset the MARK flag for all variables on the stack
S->fast -= S->fast >> 5; S->fast += lbd << 15; // Update the fast moving average
S->slow -= S->slow >> 15; S->slow += lbd << 5; // Update the slow moving average
while (S->assigned > S->processed) // Loop over all unprocessed literals
unassign(S, *(S->assigned--)); // Unassign all lits between tail & head
unassign(S, *S->assigned); // Assigned now equal to processed
S->buffer[size] = 0; // Terminate the buffer (and potentially print clause)
return addClause(S, S->buffer, size, 0);
} // Add new conflict clause to redundant DB
__device__
int propagate(struct solver* S) { // Performs unit propagation
int forced = S->reason[abs(*S->processed)]; // Initialize forced flag
while (S->processed < S->assigned) { // While unprocessed false literals
int lit = *(S->processed++); // Get first unprocessed literal
int* watch = &S->first[lit]; // Obtain the first watch pointer
while (*watch != END) { // While there are watched clauses (watched by lit)
int i, unit = 1; // Let's assume that the clause is unit
int* clause = (S->DB + *watch + 1); // Get the clause from DB
if (clause[-2] == 0) clause++; // Set the pointer to the first literal in the clause
if (clause[0] == lit) clause[0] = clause[1]; // Ensure that the other watched literal is in front
for (i = 2; unit && clause[i]; i++) // Scan the non-watched literals
if (!S->_false[clause[i]]) { // When clause[i] is not false, it is either true or unset
clause[1] = clause[i]; clause[i] = lit; // Swap literals
int store = *watch; unit = 0; // Store the old watch
*watch = S->DB[*watch]; // Remove the watch from the list of lit
//printf("add watch\n");
addWatch(S, clause[1], store);
} // Add the watch to the list of clause[1]
if (unit) { // If the clause is indeed unit
//printf("unit\n");
clause[1] = lit; watch = (S->DB + *watch); // Place lit at clause[1] and update next watch
if (S->_false[-clause[0]]) continue; // If the other watched literal is satisfied continue
if (!S->_false[clause[0]]) { // If the other watched literal is falsified,
assign(S, clause, forced);
} // A unit clause is found, and the reason is set
else {
if (forced) { // Found a root level conflict -> UNSAT
//S->result = 0;
return UNSAT;
}
int* lemma = analyze(S, clause); // Analyze the conflict return a conflict clause
if (!lemma[1]) forced = 1; // In case a unit clause is found, set forced flag
assign(S, lemma, forced); break;
}
}
}
} // Assign the conflict clause as a unit
if (forced) S->forced = S->processed; // Set S->forced if applicable
//S->result = 1;
return SAT;
} // Finally, no conflict was found
__global__
void solve(struct solver** multi_s) { // Determine satisfiability
struct solver* S = multi_s[blockIdx.x];
int decision = S->head; S->res = 0; // Initialize the solver
for (;;) { // Main solve loop
int old_nLemmas = S->nLemmas; // Store nLemmas to see whether propagate adds lemmas
int res = propagate(S);
if (res == UNSAT) {
printf("file_%d=UNSAT,vars=%i,clauses=%i,mem=%i,conflicts=%i,lemmas=%i\n", S->file_id, S->nVars, S->nClauses, S->mem_used, S->nConflicts, S->maxLemmas);
multi_s[threadIdx.x]->result = UNSAT;
//printf("result -->", S->result);
return;
} // Propagation returns UNSAT for a root level conflict
if (S->nLemmas > old_nLemmas) { // If the last decision caused a conflict
decision = S->head; // Reset the decision heuristic to head
if (S->fast > (S->slow / 100) * 125) { // If fast average is substantially larger than slow average
// printf("c restarting after %i conflicts (%i %i) %i\n", S->res, S->fast, S->slow, S->nLemmas > S->maxLemmas);
S->res = 0; S->fast = (S->slow / 100) * 125; restart(S); // Restart and update the averages
if (S->nLemmas > S->maxLemmas) reduceDB(S, 6);
}
} // Reduce the DB when it contains too many lemmas
while (S->_false[decision] || S->_false[-decision]) { // As long as the temporay decision is assigned
decision = S->prev[decision];
}
//printf("decision: %d \n", decision); // Replace it with the next variable in the decision list
if (decision == 0) {
printf("file_%d=SAT,vars=%i,clauses=%i,mem=%i,conflicts=%i,lemmas=%i\n", S->file_id,S->nVars,S->nClauses,S->mem_used,S->nConflicts,S->maxLemmas);
multi_s[threadIdx.x]->result = SAT;
//printf("result -->", S->result );
return; // If the end of the list is reached, then a solution is found
}
decision = S->model[decision] ? decision : -decision; // Otherwise, assign the decision variable based on the model
S->_false[-decision] = 1; // Assign the decision literal to true (change to IMPLIED-1?)
*(S->assigned++) = -decision; // And push it on the assigned stack
decision = abs(decision); S->reason[decision] = 0;
}
} // Decisions have no reason clauses
__global__
void init(struct solver* S, int* dev_elements, int nElements, int nVars, int nClauses, int* db,
int* file_id, int DB_MAX_MEM, int CLAUSE_LEARN_MAX_MEM, int INITIAL_MAX_LEMMAS) {
S->file_id = *file_id;
S->nVars = nVars;
S->nClauses = nClauses;
//S->mem_max = 100000; // Set the initial maximum memory
S->mem_max = DB_MAX_MEM; // Set the initial maximum memory
S->mem_used = 0; // The number of integers allocated in the DB
S->nLemmas = 0; // The number of learned clauses -- redundant means learned
S->nConflicts = 0; // Under of conflicts which is used to updates scores
S->maxLemmas = INITIAL_MAX_LEMMAS; // Initial maximum number of learnt clauses
//S->fast = S->slow = 1 << 24; // Initialize the fast and slow moving averages
S->fast = S->slow = CLAUSE_LEARN_MAX_MEM; // Initialize the fast and slow moving averages
S->result = -1;
S->DB = db;
S->model = getMemory(S, S->nVars + 1); // Full assignment of the (Boolean) variables (initially set to false)
S->next = getMemory(S, S->nVars + 1); // Next variable in the heuristic order
S->prev = getMemory(S, S->nVars + 1); // Previous variable in the heuristic order
S->buffer = getMemory(S, S->nVars); // A buffer to store a temporary clause
S->reason = getMemory(S, S->nVars + 1); // Array of clauses
S->falseStack = getMemory(S, S->nVars + 1); // Stack of falsified literals -- this pointer is never changed
S->forced = S->falseStack; // Points inside *falseStack at first decision (unforced literal)
S->processed = S->falseStack; // Points inside *falseStack at first unprocessed literal
S->assigned = S->falseStack; // Points inside *falseStack at last unprocessed literal
S->_false = getMemory(S, 2 * S->nVars + 1);
S->_false += S->nVars; // Labels for variables, non-zero means false
S->first = getMemory(S, 2 * S->nVars + 1);
S->first += S->nVars; // Offset of the first watched clause
S->DB[S->mem_used++] = 0; // Make sure there is a 0 before the clauses are loaded.
int i; for (i = 1; i <= S->nVars; i++) { // Initialize the main datastructes:
S->prev[i] = i - 1;
S->next[i - 1] = i;
S->model[i] = S->_false[-i] = S->_false[i] = 0;
S->first[i] = S->first[-i] = END; // and first (watch pointers).
S->head = S->nVars; // Initialize the head of the double-linked list
}
int nZeros = S->nClauses, size = 0; // Initialize the number of clauses to read
for (int i = 0; i < nElements; i++) { // While there are elements
int lit = 0;
lit = dev_elements[i];
if (!lit) { // If reaching the end of the clause
int* clause = addClause(S, S->buffer, size, 1); // Then add the clause to data_base
if (!size || ((size == 1) && S->_false[clause[0]])) { // Check for empty clause or conflicting unit
printf("\n + UNSAT + \n");
S->result = 1;
return;
} // If either is found return UNSAT
if ((size == 1) && !S->_false[-clause[0]]) { // Check for a new unit
assign(S, clause, 1);
} // Directly assign new units (forced = 1)
size = 0; --nZeros;
}
else S->buffer[size++] = lit;
}
//printf("\n INITIALIZED \n");
} // Return that no conflict was observed
__host__
static void read_until_new_line(FILE* input) {
int ch;
while ((ch = getc(input)) != '\n')
if (ch == EOF) { printf("parse error: unexpected EOF"); exit(1); }
}
int main(int argc, char** argv) {
if (argc < 6) {
printf("USAGE: ./mcuda <formulas dir> <DB_MAX_MEM> <CLAUSE_LEARN_MAX_MEM> <INITIAL_MAX_LEMMAS> <GPU_COUNT>\n");
return 0;
}
//char* directory = "C://microsat//sat";
char* directory = argv[1];
int num_file = 0;
int nVars = 0;
int nClauses = 0;
Metrics exec_metrics = { 0, 0, 0, 0, 0 };
int db_max_mem = atoi(argv[2]);
int clause_learn_max_mem = atoi(argv[3]);
int initial_max_mem = atoi(argv[4]);
int gpu_count = atoi(argv[5]);
printf("DB_MAX_MEM: %d\n", db_max_mem);
printf("CLAUSE_LEARN_MAX_MEM: %d\n", clause_learn_max_mem);
printf("INITIAL_MAX_LEMMAS: %d\n", initial_max_mem);
printf("GPU_COUNT: %d\n", gpu_count);
deviceInfo();
for (int i = 0; i < gpu_count; i++) {
hipSetDevice(i);
hipDeviceReset();
printf("gpu %d=", i);
showMem();
}
clock_t start, end;
printf(" Start\n");
start = clock();
DIR* dirp;
struct dirent* entry;
dirp = opendir(directory);
while ((entry = readdir(dirp)) != NULL) {
if (entry->d_type == DT_REG) { /* If the entry is a regular file */
num_file++;
}
}
closedir(dirp);
exec_metrics.files_count = num_file;
int per_gpu_files = ceil(num_file / gpu_count);
// printf(" num file -> %d\n",num_file);
printf("PER_GPU_FILES_COUNT: %d\n", per_gpu_files);
int mem = sizeof(int) * db_max_mem; //TODO: allocazione dinamica della memoria
solver **h_multi_struct[gpu_count];
int *db[gpu_count];
for (int i = 0; i < gpu_count; i++) {
hipSetDevice(i);
h_multi_struct[i] = (solver**)malloc(per_gpu_files * sizeof(solver*));
gpuErrchk(hipMalloc((void**)&db[i], mem * per_gpu_files));
}
if (NULL == (dirp = opendir(directory)))
{
printf("Error : Failed to open input directory \n");
return 1;
}
clock_t start_parse = clock();
int current_gpu_id = 0;
hipSetDevice(current_gpu_id);
int total_processed_files_count = 0;
int processed_per_gpu = 0;
while ((entry = readdir(dirp)))
{
if (processed_per_gpu == per_gpu_files) {
hipSetDevice(++current_gpu_id);
processed_per_gpu = 0;
}
if (!strcmp(entry->d_name, "."))
continue;
if (!strcmp(entry->d_name, ".."))
continue;
char path[100] = ""; //TODO: magic number
strcpy(path, directory);
strcat(path, "//");
strcat(path, entry->d_name);
//printf("file_%d=%s\n", count, entry->d_name);
FILE* input = fopen(path, "r");
if (input == NULL)
{
printf("Error : Failed to open entry file \n");
fclose(input);
return 1;
}
struct solver* dev_s;
gpuErrchk(hipMalloc((void**)&dev_s, sizeof(solver)));
struct stat st;
stat(path, &st);
int size = st.st_size;
//printf("\n size -> %d\n", size);
int* buffer = 0;
buffer = (int*)malloc(size * sizeof(int));
/********* FILE PARSER **************/
int tmp;
while ((tmp = getc(input)) == 'c') read_until_new_line(input);
ungetc(tmp, input);
do {
tmp = fscanf(input, " p cnf %i %i \n", &nVars, &nClauses);
if (tmp > 0 && tmp != EOF) break; tmp = fscanf(input, "%*s\n");
} while (tmp != 2 && tmp != EOF);
int nElements = 0;
do {
int ch = getc(input);
if (ch == '\%') break; //we have % as EOF in some dimacs files
if (ch == ' ' || ch == '\n') continue;
if (ch == 'c') { read_until_new_line(input); continue; }
ungetc(ch, input);
int lit = 0;
tmp = fscanf(input, " %i ", &lit);
buffer[nElements] = lit;
//printf("%d ", lit);
nElements++;
} while (tmp != EOF);
nElements--; // TO CHECK
int* elements = 0;
elements = (int*)malloc(nElements * sizeof(int));
for (int i = 0; i < nElements; i++) {
elements[i] = buffer[i];
}
fclose(input);
/********* FILE PARSER **************/
int* dev_file_id;
gpuErrchk(hipMalloc((void**)&dev_file_id, sizeof(int)));
gpuErrchk(hipMemcpy(dev_file_id, &total_processed_files_count, sizeof(int), hipMemcpyHostToDevice));
int* dev_elements;
gpuErrchk(hipMalloc((void**)&dev_elements, nElements * sizeof(int)));
gpuErrchk(hipMemcpy(dev_elements, elements, nElements * sizeof(int), hipMemcpyHostToDevice));
free(buffer);
free(elements);
//hipDeviceSetLimit(hipLimitMallocHeapSize, 128 * 1024 * 1024);
//printf("\n INIT \n");
hipEvent_t d_start_init, d_stop_init;
hipEventCreate(&d_start_init);
hipEventCreate(&d_stop_init);
hipEventRecord(d_start_init, 0);
// init << <1, 1 >> > (dev_s, dev_elements, nElements, nVars, nClauses, &(db[count * mem]), dev_file_id, db_max_mem, clause_learn_max_mem, initial_max_mem);
int* db_offset = db[current_gpu_id] + (db_max_mem * processed_per_gpu);
init << <1, 1 >> > (dev_s, dev_elements, nElements, nVars, nClauses, db_offset, dev_file_id, mem, clause_learn_max_mem, initial_max_mem);
hipEventRecord(d_stop_init, 0);
hipEventSynchronize(d_stop_init);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, d_start_init, d_stop_init); // that's our time!
exec_metrics.init_time += elapsedTime;
// Clean up:
hipEventDestroy(d_start_init);
hipEventDestroy(d_stop_init);
//printf("parsing_file -> %s\n", entry->d_name);
//printf("device_time -> %f s\n", elapsedTime / 1000000);
//exec_metrics.init_time += elapsedTime / 1000000;
gpuErrchk(hipDeviceSynchronize());
//temp
//printf("\n dev_s -> %p\n",dev_s);
solver **current_multi_struct = h_multi_struct[current_gpu_id];
current_multi_struct[processed_per_gpu] = dev_s;
// if(current_gpu_id == 0) {
// h_multi_struct_1[processed_per_gpu] = dev_s;
// } else {
// h_multi_struct_2[processed_per_gpu] = dev_s;
// }
total_processed_files_count++;
processed_per_gpu++;
}
/*********** end init and parse ***********/
exec_metrics.parse_time = (clock() - start_parse);
printf("\n SOLVE \n");
exec_metrics.solve_time = 0;
for (int i = 0; i < gpu_count; i++) {
hipSetDevice(i);
hipEvent_t d_start, d_stop;
hipEventCreate(&d_start);
hipEventCreate(&d_stop);
solver** d_multi_struct;
gpuErrchk(hipMalloc((void**)&d_multi_struct, per_gpu_files * sizeof(solver*)));
gpuErrchk(hipMemcpy(d_multi_struct, h_multi_struct[i], per_gpu_files * sizeof(solver*), hipMemcpyHostToDevice));
hipEventRecord(d_start, 0);
solve << <per_gpu_files, 1 >> > (d_multi_struct);
gpuErrchk(hipDeviceSynchronize());
hipEventRecord(d_stop, 0);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, d_start, d_stop); // that's our time!
// Clean up:
hipEventDestroy(d_start);
hipEventDestroy(d_stop);
exec_metrics.solve_time += elapsedTime;
printf("gpu %d=", i);
showMem();
hipDeviceReset();
}
end = clock();
//printf("\n total time: %f s\n", (float)(end - start) / 1000000);
exec_metrics.tot_time = (float)(end - start);
printf("\n+++ metrics +++\n");
//showMem();
printf("files count: %d\nparse time (s): %f\ncuda init time (s): %f\ncuda solve time (s): %f\ntot time (s): %f\n\n", exec_metrics.files_count, exec_metrics.parse_time / CLOCKS_PER_SEC, exec_metrics.init_time / 1000, exec_metrics.solve_time / 1000, exec_metrics.tot_time / CLOCKS_PER_SEC);
//printf ("c statistics of %s: mem: %i conflicts: %i max_lemmas: %i\n", argv[1], S.mem_used, S.nConflicts, S.maxLemmas);
//printf("\n END \n");
return 0;
}
| 4404151defecef6992aa32bd2508d80ac2badb02.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <sys/stat.h>
#include <dirent.h>
#include <string.h>
#include <stdio.h>
#include <time.h>
#include <string>
//#include <pthread.h>
//#include "thpool.h"
struct solver { // The variables in the struct are described in the allocate procedure
int* DB, nVars, nClauses, mem_used, mem_fixed, mem_max, maxLemmas, nLemmas, * buffer, nConflicts, * model,
* reason, * falseStack, * _false, * first, * forced, * processed, * assigned, * next, * prev, head, res, fast, slow,
result, file_id;
};
typedef struct {
int files_count;
double parse_time;
double init_time;
double solve_time;
double tot_time;
} Metrics;
enum { END = -9, UNSAT = 0, SAT = 1, MARK = 2, IMPLIED = 6 };
void deviceInfo(){
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
void showMem(){
// show memory usage of GPU
size_t free_byte ;
size_t total_byte ;
cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ;
if ( cudaSuccess != cuda_status ){
printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) );
exit(1);
}
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n",
used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
//showMem();
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__
int* getMemory(struct solver* S, int mem_size) { // Allocate memory of size mem_size
if (S->mem_used + mem_size > S->mem_max) { // In case the code is used within a code base
printf("c out of memory\n");
return 0;
}
int* store = (S->DB + S->mem_used); // Compute a pointer to the new memory location
S->mem_used += mem_size; // Update the size of the used memory
return store;
} // Return the pointer
__device__
void unassign(struct solver* S, int lit) { S->_false[lit] = 0; } // Unassign the literal
__device__
void restart(struct solver* S) { // Perform a restart (i.e., unassign all variables)
while (S->assigned > S->forced) unassign(S, *(--S->assigned)); // Remove all unforced false lits from falseStack
S->processed = S->forced;
} // Reset the processed pointer
__device__
void assign(struct solver* S, int* reason, int forced) { // Make the first literal of the reason true
int lit = reason[0]; // Let lit be the first ltieral in the reason
S->_false[-lit] = forced ? IMPLIED : 1; // Mark lit as true and IMPLIED if forced
*(S->assigned++) = -lit; // Push it on the assignment stack
S->reason[abs(lit)] = 1 + (int)((reason)-S->DB); // Set the reason clause of lit
S->model[abs(lit)] = (lit > 0);
} // Mark the literal as true in the model
__device__
void addWatch(struct solver* S, int lit, int mem) { // Add a watch pointer to a cfor entry function '_Z5solveP6solver' containing lit
S->DB[mem] = S->first[lit]; S->first[lit] = mem;
} // By updating the database afor entry function '_Z5solveP6solver'e pointers
__device__
int* addClause(struct solver* S, int* buffer, int size, int irr) { // Adds a clause stored in *in of size size
int i, used = S->mem_used; // Store a pointer to the beginning of the clause
int* clause = getMemory(S, size + 3) + 2; // Allocate memory for the clause in the database
if (size > 1) {
addWatch(S, buffer[0], used); // If the clause is not unit, then add
addWatch(S, buffer[1], used + 1);
} // Two watch pointers to the datastructure
for (i = 0; i < size; i++) clause[i] = buffer[i]; clause[i] = 0; // Copy the clause from the buffer to the database
if (irr) S->mem_fixed = S->mem_used; else S->nLemmas++; // Update the statistics
return clause;
} // Return the pointer to the clause is the database
__device__
void reduceDB(struct solver* S, int k) { // Removes "less useful" lemmas from DB
//printf("Start reduceDB function\n");
while (S->nLemmas > S->maxLemmas) S->maxLemmas += 300; // Allow more lemmas in the future
S->nLemmas = 0; // Reset the number of lemmas
int i; for (i = -S->nVars; i <= S->nVars; i++) { // Loop over the variables
if (i == 0) continue; int* watch = &S->first[i]; // Get the pointer to the first watched clause
while (*watch != END) // As long as there are watched clauses
if (*watch < S->mem_fixed) watch = (S->DB + *watch); // Remove the watch if it points to a lemma
else *watch = S->DB[*watch];
} // Otherwise (meaning an input clause) go to next watch
int old_used = S->mem_used; S->mem_used = S->mem_fixed; // Virtually remove all lemmas
for (i = S->mem_fixed + 2; i < old_used; i += 3) { // While the old memory contains lemmas
int count = 0, head = i; // Get the lemma to which the head is pointing
while (S->DB[i]) {
int lit = S->DB[i++]; // Count the number of literals
if ((lit > 0) == S->model[abs(lit)]) count++;
} // That are satisfied by the current model
if (count < k) addClause(S, S->DB + head, i - head, 0);
}
} // If the latter is smaller than k, add it back
__device__
void bump(struct solver* S, int lit) { // Move the variable to the front of the decision list
//printf("Start bump function\n");
if (S->_false[lit] != IMPLIED) {
S->_false[lit] = MARK; // MARK the literal as involved if not a top-level unit
int var = abs(lit); if (var != S->head) { // In case var is not already the head of the list
S->prev[S->next[var]] = S->prev[var]; // Update the prev link, and
S->next[S->prev[var]] = S->next[var]; // Update the next link, and
S->next[S->head] = var; // Add a next link to the head, and
S->prev[var] = S->head; S->head = var;
}
}
} // Make var the new head
__device__
int implied(struct solver* S, int lit) { // Check if lit(eral) is implied by MARK literals
// printf("Start implied function\n");
if (S->_false[lit] > MARK) return (S->_false[lit] & MARK); // If checked before return old result
if (!S->reason[abs(lit)]) return 0; // In case lit is a decision, it is not implied
int* p = (S->DB + S->reason[abs(lit)] - 1); // Get the reason of lit(eral)
while (*(++p)) // While there are literals in the reason
if ((S->_false[*p] ^ MARK) && !implied(S, *p)) { // Recursively check if non-MARK literals are implied
S->_false[lit] = IMPLIED - 1; return 0;
} // Mark and return not implied (denoted by IMPLIED - 1)
S->_false[lit] = IMPLIED; return 1;
} // Mark and return that the literal is implied
__device__
int* analyze(struct solver* S, int* clause) { // Compute a resolvent from falsified clause
// printf("Start analyze\n");
S->res++; S->nConflicts++; // Bump restarts and update the statistic
while (*clause) bump(S, *(clause++)); // MARK all literals in the falsified clause
while (S->reason[abs(*(--S->assigned))]) { // Loop on variables on falseStack until the last decision
if (S->_false[*S->assigned] == MARK) { // If the tail of the stack is MARK
int* check = S->assigned; // Pointer to check if first-UIP is reached
while (S->_false[*(--check)] != MARK) // Check for a MARK literal before decision
if (!S->reason[abs(*check)]) goto build; // Otherwise it is the first-UIP so break
clause = S->DB + S->reason[abs(*S->assigned)]; // Get the reason and ignore first literal
while (*clause) bump(S, *(clause++));
} // MARK all literals in reason
unassign(S, *S->assigned);
} // Unassign the tail of the stack
build:; int size = 0, lbd = 0, flag = 0; // Build conflict clause; Empty the clause buffer
int* p = S->processed = S->assigned; // Loop from tail to front
while (p >= S->forced) { // Only literals on the stack can be MARKed
if ((S->_false[*p] == MARK) && !implied(S, *p)) { // If MARKed and not implied
S->buffer[size++] = *p; flag = 1;
} // Add literal to conflict clause buffer
if (!S->reason[abs(*p)]) {
lbd += flag; flag = 0; // Increase LBD for a decision with a true flag
if (size == 1) S->processed = p;
} // And update the processed pointer
S->_false[*(p--)] = 1;
} // Reset the MARK flag for all variables on the stack
S->fast -= S->fast >> 5; S->fast += lbd << 15; // Update the fast moving average
S->slow -= S->slow >> 15; S->slow += lbd << 5; // Update the slow moving average
while (S->assigned > S->processed) // Loop over all unprocessed literals
unassign(S, *(S->assigned--)); // Unassign all lits between tail & head
unassign(S, *S->assigned); // Assigned now equal to processed
S->buffer[size] = 0; // Terminate the buffer (and potentially print clause)
return addClause(S, S->buffer, size, 0);
} // Add new conflict clause to redundant DB
__device__
int propagate(struct solver* S) { // Performs unit propagation
int forced = S->reason[abs(*S->processed)]; // Initialize forced flag
while (S->processed < S->assigned) { // While unprocessed false literals
int lit = *(S->processed++); // Get first unprocessed literal
int* watch = &S->first[lit]; // Obtain the first watch pointer
while (*watch != END) { // While there are watched clauses (watched by lit)
int i, unit = 1; // Let's assume that the clause is unit
int* clause = (S->DB + *watch + 1); // Get the clause from DB
if (clause[-2] == 0) clause++; // Set the pointer to the first literal in the clause
if (clause[0] == lit) clause[0] = clause[1]; // Ensure that the other watched literal is in front
for (i = 2; unit && clause[i]; i++) // Scan the non-watched literals
if (!S->_false[clause[i]]) { // When clause[i] is not false, it is either true or unset
clause[1] = clause[i]; clause[i] = lit; // Swap literals
int store = *watch; unit = 0; // Store the old watch
*watch = S->DB[*watch]; // Remove the watch from the list of lit
//printf("add watch\n");
addWatch(S, clause[1], store);
} // Add the watch to the list of clause[1]
if (unit) { // If the clause is indeed unit
//printf("unit\n");
clause[1] = lit; watch = (S->DB + *watch); // Place lit at clause[1] and update next watch
if (S->_false[-clause[0]]) continue; // If the other watched literal is satisfied continue
if (!S->_false[clause[0]]) { // If the other watched literal is falsified,
assign(S, clause, forced);
} // A unit clause is found, and the reason is set
else {
if (forced) { // Found a root level conflict -> UNSAT
//S->result = 0;
return UNSAT;
}
int* lemma = analyze(S, clause); // Analyze the conflict return a conflict clause
if (!lemma[1]) forced = 1; // In case a unit clause is found, set forced flag
assign(S, lemma, forced); break;
}
}
}
} // Assign the conflict clause as a unit
if (forced) S->forced = S->processed; // Set S->forced if applicable
//S->result = 1;
return SAT;
} // Finally, no conflict was found
__global__
void solve(struct solver** multi_s) { // Determine satisfiability
struct solver* S = multi_s[blockIdx.x];
int decision = S->head; S->res = 0; // Initialize the solver
for (;;) { // Main solve loop
int old_nLemmas = S->nLemmas; // Store nLemmas to see whether propagate adds lemmas
int res = propagate(S);
if (res == UNSAT) {
printf("file_%d=UNSAT,vars=%i,clauses=%i,mem=%i,conflicts=%i,lemmas=%i\n", S->file_id, S->nVars, S->nClauses, S->mem_used, S->nConflicts, S->maxLemmas);
multi_s[threadIdx.x]->result = UNSAT;
//printf("result -->", S->result);
return;
} // Propagation returns UNSAT for a root level conflict
if (S->nLemmas > old_nLemmas) { // If the last decision caused a conflict
decision = S->head; // Reset the decision heuristic to head
if (S->fast > (S->slow / 100) * 125) { // If fast average is substantially larger than slow average
// printf("c restarting after %i conflicts (%i %i) %i\n", S->res, S->fast, S->slow, S->nLemmas > S->maxLemmas);
S->res = 0; S->fast = (S->slow / 100) * 125; restart(S); // Restart and update the averages
if (S->nLemmas > S->maxLemmas) reduceDB(S, 6);
}
} // Reduce the DB when it contains too many lemmas
while (S->_false[decision] || S->_false[-decision]) { // As long as the temporay decision is assigned
decision = S->prev[decision];
}
//printf("decision: %d \n", decision); // Replace it with the next variable in the decision list
if (decision == 0) {
printf("file_%d=SAT,vars=%i,clauses=%i,mem=%i,conflicts=%i,lemmas=%i\n", S->file_id,S->nVars,S->nClauses,S->mem_used,S->nConflicts,S->maxLemmas);
multi_s[threadIdx.x]->result = SAT;
//printf("result -->", S->result );
return; // If the end of the list is reached, then a solution is found
}
decision = S->model[decision] ? decision : -decision; // Otherwise, assign the decision variable based on the model
S->_false[-decision] = 1; // Assign the decision literal to true (change to IMPLIED-1?)
*(S->assigned++) = -decision; // And push it on the assigned stack
decision = abs(decision); S->reason[decision] = 0;
}
} // Decisions have no reason clauses
__global__
void init(struct solver* S, int* dev_elements, int nElements, int nVars, int nClauses, int* db,
int* file_id, int DB_MAX_MEM, int CLAUSE_LEARN_MAX_MEM, int INITIAL_MAX_LEMMAS) {
S->file_id = *file_id;
S->nVars = nVars;
S->nClauses = nClauses;
//S->mem_max = 100000; // Set the initial maximum memory
S->mem_max = DB_MAX_MEM; // Set the initial maximum memory
S->mem_used = 0; // The number of integers allocated in the DB
S->nLemmas = 0; // The number of learned clauses -- redundant means learned
S->nConflicts = 0; // Under of conflicts which is used to updates scores
S->maxLemmas = INITIAL_MAX_LEMMAS; // Initial maximum number of learnt clauses
//S->fast = S->slow = 1 << 24; // Initialize the fast and slow moving averages
S->fast = S->slow = CLAUSE_LEARN_MAX_MEM; // Initialize the fast and slow moving averages
S->result = -1;
S->DB = db;
S->model = getMemory(S, S->nVars + 1); // Full assignment of the (Boolean) variables (initially set to false)
S->next = getMemory(S, S->nVars + 1); // Next variable in the heuristic order
S->prev = getMemory(S, S->nVars + 1); // Previous variable in the heuristic order
S->buffer = getMemory(S, S->nVars); // A buffer to store a temporary clause
S->reason = getMemory(S, S->nVars + 1); // Array of clauses
S->falseStack = getMemory(S, S->nVars + 1); // Stack of falsified literals -- this pointer is never changed
S->forced = S->falseStack; // Points inside *falseStack at first decision (unforced literal)
S->processed = S->falseStack; // Points inside *falseStack at first unprocessed literal
S->assigned = S->falseStack; // Points inside *falseStack at last unprocessed literal
S->_false = getMemory(S, 2 * S->nVars + 1);
S->_false += S->nVars; // Labels for variables, non-zero means false
S->first = getMemory(S, 2 * S->nVars + 1);
S->first += S->nVars; // Offset of the first watched clause
S->DB[S->mem_used++] = 0; // Make sure there is a 0 before the clauses are loaded.
int i; for (i = 1; i <= S->nVars; i++) { // Initialize the main datastructes:
S->prev[i] = i - 1;
S->next[i - 1] = i;
S->model[i] = S->_false[-i] = S->_false[i] = 0;
S->first[i] = S->first[-i] = END; // and first (watch pointers).
S->head = S->nVars; // Initialize the head of the double-linked list
}
int nZeros = S->nClauses, size = 0; // Initialize the number of clauses to read
for (int i = 0; i < nElements; i++) { // While there are elements
int lit = 0;
lit = dev_elements[i];
if (!lit) { // If reaching the end of the clause
int* clause = addClause(S, S->buffer, size, 1); // Then add the clause to data_base
if (!size || ((size == 1) && S->_false[clause[0]])) { // Check for empty clause or conflicting unit
printf("\n + UNSAT + \n");
S->result = 1;
return;
} // If either is found return UNSAT
if ((size == 1) && !S->_false[-clause[0]]) { // Check for a new unit
assign(S, clause, 1);
} // Directly assign new units (forced = 1)
size = 0; --nZeros;
}
else S->buffer[size++] = lit;
}
//printf("\n INITIALIZED \n");
} // Return that no conflict was observed
__host__
static void read_until_new_line(FILE* input) {
int ch;
while ((ch = getc(input)) != '\n')
if (ch == EOF) { printf("parse error: unexpected EOF"); exit(1); }
}
int main(int argc, char** argv) {
if (argc < 6) {
printf("USAGE: ./mcuda <formulas dir> <DB_MAX_MEM> <CLAUSE_LEARN_MAX_MEM> <INITIAL_MAX_LEMMAS> <GPU_COUNT>\n");
return 0;
}
//char* directory = "C://microsat//sat";
char* directory = argv[1];
int num_file = 0;
int nVars = 0;
int nClauses = 0;
Metrics exec_metrics = { 0, 0, 0, 0, 0 };
int db_max_mem = atoi(argv[2]);
int clause_learn_max_mem = atoi(argv[3]);
int initial_max_mem = atoi(argv[4]);
int gpu_count = atoi(argv[5]);
printf("DB_MAX_MEM: %d\n", db_max_mem);
printf("CLAUSE_LEARN_MAX_MEM: %d\n", clause_learn_max_mem);
printf("INITIAL_MAX_LEMMAS: %d\n", initial_max_mem);
printf("GPU_COUNT: %d\n", gpu_count);
deviceInfo();
for (int i = 0; i < gpu_count; i++) {
cudaSetDevice(i);
cudaDeviceReset();
printf("gpu %d=", i);
showMem();
}
clock_t start, end;
printf(" Start\n");
start = clock();
DIR* dirp;
struct dirent* entry;
dirp = opendir(directory);
while ((entry = readdir(dirp)) != NULL) {
if (entry->d_type == DT_REG) { /* If the entry is a regular file */
num_file++;
}
}
closedir(dirp);
exec_metrics.files_count = num_file;
int per_gpu_files = ceil(num_file / gpu_count);
// printf(" num file -> %d\n",num_file);
printf("PER_GPU_FILES_COUNT: %d\n", per_gpu_files);
int mem = sizeof(int) * db_max_mem; //TODO: allocazione dinamica della memoria
solver **h_multi_struct[gpu_count];
int *db[gpu_count];
for (int i = 0; i < gpu_count; i++) {
cudaSetDevice(i);
h_multi_struct[i] = (solver**)malloc(per_gpu_files * sizeof(solver*));
gpuErrchk(cudaMalloc((void**)&db[i], mem * per_gpu_files));
}
if (NULL == (dirp = opendir(directory)))
{
printf("Error : Failed to open input directory \n");
return 1;
}
clock_t start_parse = clock();
int current_gpu_id = 0;
cudaSetDevice(current_gpu_id);
int total_processed_files_count = 0;
int processed_per_gpu = 0;
while ((entry = readdir(dirp)))
{
if (processed_per_gpu == per_gpu_files) {
cudaSetDevice(++current_gpu_id);
processed_per_gpu = 0;
}
if (!strcmp(entry->d_name, "."))
continue;
if (!strcmp(entry->d_name, ".."))
continue;
char path[100] = ""; //TODO: magic number
strcpy(path, directory);
strcat(path, "//");
strcat(path, entry->d_name);
//printf("file_%d=%s\n", count, entry->d_name);
FILE* input = fopen(path, "r");
if (input == NULL)
{
printf("Error : Failed to open entry file \n");
fclose(input);
return 1;
}
struct solver* dev_s;
gpuErrchk(cudaMalloc((void**)&dev_s, sizeof(solver)));
struct stat st;
stat(path, &st);
int size = st.st_size;
//printf("\n size -> %d\n", size);
int* buffer = 0;
buffer = (int*)malloc(size * sizeof(int));
/********* FILE PARSER **************/
int tmp;
while ((tmp = getc(input)) == 'c') read_until_new_line(input);
ungetc(tmp, input);
do {
tmp = fscanf(input, " p cnf %i %i \n", &nVars, &nClauses);
if (tmp > 0 && tmp != EOF) break; tmp = fscanf(input, "%*s\n");
} while (tmp != 2 && tmp != EOF);
int nElements = 0;
do {
int ch = getc(input);
if (ch == '\%') break; //we have % as EOF in some dimacs files
if (ch == ' ' || ch == '\n') continue;
if (ch == 'c') { read_until_new_line(input); continue; }
ungetc(ch, input);
int lit = 0;
tmp = fscanf(input, " %i ", &lit);
buffer[nElements] = lit;
//printf("%d ", lit);
nElements++;
} while (tmp != EOF);
nElements--; // TO CHECK
int* elements = 0;
elements = (int*)malloc(nElements * sizeof(int));
for (int i = 0; i < nElements; i++) {
elements[i] = buffer[i];
}
fclose(input);
/********* FILE PARSER **************/
int* dev_file_id;
gpuErrchk(cudaMalloc((void**)&dev_file_id, sizeof(int)));
gpuErrchk(cudaMemcpy(dev_file_id, &total_processed_files_count, sizeof(int), cudaMemcpyHostToDevice));
int* dev_elements;
gpuErrchk(cudaMalloc((void**)&dev_elements, nElements * sizeof(int)));
gpuErrchk(cudaMemcpy(dev_elements, elements, nElements * sizeof(int), cudaMemcpyHostToDevice));
free(buffer);
free(elements);
//cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128 * 1024 * 1024);
//printf("\n INIT \n");
cudaEvent_t d_start_init, d_stop_init;
cudaEventCreate(&d_start_init);
cudaEventCreate(&d_stop_init);
cudaEventRecord(d_start_init, 0);
// init << <1, 1 >> > (dev_s, dev_elements, nElements, nVars, nClauses, &(db[count * mem]), dev_file_id, db_max_mem, clause_learn_max_mem, initial_max_mem);
int* db_offset = db[current_gpu_id] + (db_max_mem * processed_per_gpu);
init << <1, 1 >> > (dev_s, dev_elements, nElements, nVars, nClauses, db_offset, dev_file_id, mem, clause_learn_max_mem, initial_max_mem);
cudaEventRecord(d_stop_init, 0);
cudaEventSynchronize(d_stop_init);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, d_start_init, d_stop_init); // that's our time!
exec_metrics.init_time += elapsedTime;
// Clean up:
cudaEventDestroy(d_start_init);
cudaEventDestroy(d_stop_init);
//printf("parsing_file -> %s\n", entry->d_name);
//printf("device_time -> %f s\n", elapsedTime / 1000000);
//exec_metrics.init_time += elapsedTime / 1000000;
gpuErrchk(cudaDeviceSynchronize());
//temp
//printf("\n dev_s -> %p\n",dev_s);
solver **current_multi_struct = h_multi_struct[current_gpu_id];
current_multi_struct[processed_per_gpu] = dev_s;
// if(current_gpu_id == 0) {
// h_multi_struct_1[processed_per_gpu] = dev_s;
// } else {
// h_multi_struct_2[processed_per_gpu] = dev_s;
// }
total_processed_files_count++;
processed_per_gpu++;
}
/*********** end init and parse ***********/
exec_metrics.parse_time = (clock() - start_parse);
printf("\n SOLVE \n");
exec_metrics.solve_time = 0;
for (int i = 0; i < gpu_count; i++) {
cudaSetDevice(i);
cudaEvent_t d_start, d_stop;
cudaEventCreate(&d_start);
cudaEventCreate(&d_stop);
solver** d_multi_struct;
gpuErrchk(cudaMalloc((void**)&d_multi_struct, per_gpu_files * sizeof(solver*)));
gpuErrchk(cudaMemcpy(d_multi_struct, h_multi_struct[i], per_gpu_files * sizeof(solver*), cudaMemcpyHostToDevice));
cudaEventRecord(d_start, 0);
solve << <per_gpu_files, 1 >> > (d_multi_struct);
gpuErrchk(cudaDeviceSynchronize());
cudaEventRecord(d_stop, 0);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, d_start, d_stop); // that's our time!
// Clean up:
cudaEventDestroy(d_start);
cudaEventDestroy(d_stop);
exec_metrics.solve_time += elapsedTime;
printf("gpu %d=", i);
showMem();
cudaDeviceReset();
}
end = clock();
//printf("\n total time: %f s\n", (float)(end - start) / 1000000);
exec_metrics.tot_time = (float)(end - start);
printf("\n+++ metrics +++\n");
//showMem();
printf("files count: %d\nparse time (s): %f\ncuda init time (s): %f\ncuda solve time (s): %f\ntot time (s): %f\n\n", exec_metrics.files_count, exec_metrics.parse_time / CLOCKS_PER_SEC, exec_metrics.init_time / 1000, exec_metrics.solve_time / 1000, exec_metrics.tot_time / CLOCKS_PER_SEC);
//printf ("c statistics of %s: mem: %i conflicts: %i max_lemmas: %i\n", argv[1], S.mem_used, S.nConflicts, S.maxLemmas);
//printf("\n END \n");
return 0;
}
|
b0b74b74ab0de2bf3089eef13d03a9a92c289052.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <time.h>
int main() {
float* d_a;
// my array initialized
initializeArray1D(d_a, ARR_LG, 420);
#if ENABLE_GPU
// GPU Timing variables
hipEvent_t start_g_data, stop_g_data;
hipEvent_t start_g_kernel, stop_g_kernel;
float elapsed_g_data, elapsed_g_kernel;
// <set stuff up not timed>
// Create the gpu data transfer event timer
hipEventCreate(&start_g_data);
hipEventCreate(&stop_g_data);
// Record event on the default stream
hipEventRecord(start_g_data, 0);
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(hipMemcpy(d_a, h_a, allocSize, hipMemcpyHostToDevice));
// Create the gpu kernel event timer
hipEventCreate(&start_g_kernel);
hipEventCreate(&stop_g_kernel);
// Record event on the default stream
hipEventRecord(start_g_kernel, 0);
// Runs Kernels
hipLaunchKernelGGL(( kernel_mmm), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_p);
// < do serial stuff here >
// Check for errors during launch
CUDA_SAFE_CALL(hipPeekAtLastError());
// Stop and destroy the gpu kernel timer
hipEventRecord(stop_g_kernel,0);
hipEventSynchronize(stop_g_kernel);
hipEventElapsedTime(&elapsed_g_kernel, start_g_kernel, stop_g_kernel);
hipEventDestroy(start_g_kernel);
hipEventDestroy(stop_g_kernel);
// Transfer the results back to the host
CUDA_SAFE_CALL(hipMemcpy(h_p, d_p, allocSize, hipMemcpyDeviceToHost));
// Stop and destroy the gpu data transfer timer
hipEventRecord(stop_g_data,0);
hipEventSynchronize(stop_g_data);
hipEventElapsedTime(&elapsed_g_data, start_g_data, stop_g_data);
hipEventDestroy(start_g_data);
hipEventDestroy(stop_g_data);
// < do output stuff not timed >
CUDA_SAFE_CALL(hipFree(d_a));
printf("Output row: %d\n", out);
// Print time
printf("\nGPU kernel time: %f (msec)\n", elapsed_g_kernel);
printf("\nGPU data transfer time: %f (msec)\n", elapsed_g_data);
#if ENABLE_SERIAL
// CPU Timing variables
time_t start_c, stop_c;
float elapsed_c;
// < set up stuff not timed >
// Get time start
start_c = time(NULL);
serial_mcmc(d_a, sz, out);
// < do serial stuff here >
// Get time end
stop_c = time(NULL);
elapsed_c = difftime(stop_c,start_c);
// < do output stuff not timed >
free(s_p);
free(start_c);
free(start_c);
printf("Output row: %d\n", out);
// Print time
printf("\nCPU time: %f (sec)\n", elapsed_c);
#endif
return 0;
} | b0b74b74ab0de2bf3089eef13d03a9a92c289052.cu | #include <time.h>
int main() {
float* d_a;
// my array initialized
initializeArray1D(d_a, ARR_LG, 420);
#if ENABLE_GPU
// GPU Timing variables
cudaEvent_t start_g_data, stop_g_data;
cudaEvent_t start_g_kernel, stop_g_kernel;
float elapsed_g_data, elapsed_g_kernel;
// <set stuff up not timed>
// Create the gpu data transfer event timer
cudaEventCreate(&start_g_data);
cudaEventCreate(&stop_g_data);
// Record event on the default stream
cudaEventRecord(start_g_data, 0);
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(cudaMemcpy(d_a, h_a, allocSize, cudaMemcpyHostToDevice));
// Create the gpu kernel event timer
cudaEventCreate(&start_g_kernel);
cudaEventCreate(&stop_g_kernel);
// Record event on the default stream
cudaEventRecord(start_g_kernel, 0);
// Runs Kernels
kernel_mmm<<<dimGrid, dimBlock>>>(d_a, d_b, d_p);
// < do serial stuff here >
// Check for errors during launch
CUDA_SAFE_CALL(cudaPeekAtLastError());
// Stop and destroy the gpu kernel timer
cudaEventRecord(stop_g_kernel,0);
cudaEventSynchronize(stop_g_kernel);
cudaEventElapsedTime(&elapsed_g_kernel, start_g_kernel, stop_g_kernel);
cudaEventDestroy(start_g_kernel);
cudaEventDestroy(stop_g_kernel);
// Transfer the results back to the host
CUDA_SAFE_CALL(cudaMemcpy(h_p, d_p, allocSize, cudaMemcpyDeviceToHost));
// Stop and destroy the gpu data transfer timer
cudaEventRecord(stop_g_data,0);
cudaEventSynchronize(stop_g_data);
cudaEventElapsedTime(&elapsed_g_data, start_g_data, stop_g_data);
cudaEventDestroy(start_g_data);
cudaEventDestroy(stop_g_data);
// < do output stuff not timed >
CUDA_SAFE_CALL(cudaFree(d_a));
printf("Output row: %d\n", out);
// Print time
printf("\nGPU kernel time: %f (msec)\n", elapsed_g_kernel);
printf("\nGPU data transfer time: %f (msec)\n", elapsed_g_data);
#if ENABLE_SERIAL
// CPU Timing variables
time_t start_c, stop_c;
float elapsed_c;
// < set up stuff not timed >
// Get time start
start_c = time(NULL);
serial_mcmc(d_a, sz, out);
// < do serial stuff here >
// Get time end
stop_c = time(NULL);
elapsed_c = difftime(stop_c,start_c);
// < do output stuff not timed >
free(s_p);
free(start_c);
free(start_c);
printf("Output row: %d\n", out);
// Print time
printf("\nCPU time: %f (sec)\n", elapsed_c);
#endif
return 0;
} |
313b461aee4568f34c75f9cbef149fd928a9f57c.hip | // !!! This is a file automatically generated by hipify!!!
#include <GL/glut.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
//
// Constantes para OpenGL
//
#define KEY_ESC 27
#define ANCHO 1920
#define ALTO 1080
#define false 0
#define true 1
//
// Constantes para Algoritmo de gravitacion
//
#define PI (3.141592653589793)
#define G 6.673e-11
// ===============
// ===== CPU =====
// ===============
//
// Estructuras y variables para Algoritmo de gravitacion
//
const int CUDA_BLK = 64; // Tamao predeterminado de bloque de hilos CUDA
float toroide_alfa;
float toroide_theta;
float toroide_incremento;
float toroide_lado;
float toroide_r;
float toroide_R;
int delta_tiempo = 1.0f; //Intervalo de tiempo, longitud de un paso
int pasos;
int N;
//variables nuestras CPU//
float * masas;
float * cPositionX;
float * cPositionY;
float * cPositionZ;
float * cVelocityX;
float * cVelocityY;
float * cVelocityZ;
float * fuerza_totalX;
float * fuerza_totalY;
float * fuerza_totalZ;
//variables nuestras GPU//
float * gpu_masas;
float * gpu_cPositionX;
float * gpu_cPositionY;
float * gpu_cPositionZ;
float * gpu_cVelocityX;
float * gpu_cVelocityY;
float * gpu_cVelocityZ;
float * gpu_fuerza_totalX;
float * gpu_fuerza_totalY;
float * gpu_fuerza_totalZ;
double cColorR = (double) rand() / (RAND_MAX + 1.0);
double cColorG = (double) rand() / (RAND_MAX + 1.0);
double cColorB = (double) rand() / (RAND_MAX + 1.0);
//terminan las variables nuestras.//
//
// Funciones para Algoritmo de gravitacion
//
void calcularFuerzas(int N, int dt) {
int cuerpo1, cuerpo2;
float dif_X, dif_Y, dif_Z;
float distancia;
float F;
for (cuerpo1 = 0; cuerpo1 < N - 1; cuerpo1++) {
for (cuerpo2 = cuerpo1 + 1; cuerpo2 < N; cuerpo2++) {
if ((cPositionX[cuerpo1] == cPositionX[cuerpo2]) && (cPositionY[cuerpo1] == cPositionY[cuerpo2]) && (cPositionZ[cuerpo1] == cPositionZ[cuerpo2]))
continue;
dif_X = cPositionX[cuerpo2] - cPositionX[cuerpo1];
dif_Y = cPositionY[cuerpo2] - cPositionY[cuerpo1];
dif_Z = cPositionZ[cuerpo2] - cPositionZ[cuerpo1];
distancia = sqrt(dif_X * dif_X + dif_Y * dif_Y + dif_Z * dif_Z);
F = (G * masas[cuerpo1] * masas[cuerpo2]) / (distancia * distancia);
dif_X *= F;
dif_Y *= F;
dif_Z *= F;
fuerza_totalX[cuerpo1] += dif_X;
fuerza_totalY[cuerpo1] += dif_Y;
fuerza_totalZ[cuerpo1] += dif_Z;
fuerza_totalX[cuerpo2] -= dif_X;
fuerza_totalY[cuerpo2] -= dif_Y;
fuerza_totalZ[cuerpo2] -= dif_Z;
}
}
}
void moverCuerpos(int N, int dt) {
int cuerpo;
for (cuerpo = 0; cuerpo < N; cuerpo++) {
fuerza_totalX[cuerpo] *= 1 / masas[cuerpo];
fuerza_totalY[cuerpo] *= 1 / masas[cuerpo];
cVelocityX[cuerpo] += fuerza_totalX[cuerpo] * dt;
cVelocityY[cuerpo] += fuerza_totalY[cuerpo] * dt;
cPositionX[cuerpo] += cVelocityX[cuerpo] * dt;
cPositionY[cuerpo] += cVelocityY[cuerpo] * dt;
fuerza_totalX[cuerpo] = 0.0;
fuerza_totalY[cuerpo] = 0.0;
fuerza_totalZ[cuerpo] = 0.0;
}
}
void gravitacionCPU(int N, int dt) {
calcularFuerzas(N, dt);
moverCuerpos(N, dt);
}
void inicializarEstrella(int i, double n) {
//todos van a tener la misma masa//
masas[i] = 0.001 * 8;
if ((toroide_alfa + toroide_incremento) >= 2 * M_PI) {
toroide_alfa = 0;
toroide_theta += toroide_incremento;
} else {
toroide_alfa += toroide_incremento;
}
cPositionX[i] = (toroide_R + toroide_r * cos(toroide_alfa)) * cos(toroide_theta);
cPositionY[i] = (toroide_R + toroide_r * cos(toroide_alfa)) * sin(toroide_theta);
cPositionZ[i] = toroide_r * sin(toroide_alfa);
cVelocityX[i] = 0.0;
cVelocityY[i] = 0.0;
cVelocityZ[i] = 0.0;
}
void inicializarCuerpos(int N) {
int cuerpo;
double n = N;
toroide_alfa = 0.0;
toroide_theta = 0.0;
toroide_lado = sqrt(N);
toroide_incremento = 2 * M_PI / toroide_lado;
toroide_r = 1.0;
toroide_R = 2 * toroide_r;
srand(time(NULL));
for (cuerpo = 0; cuerpo < N; cuerpo++) {
fuerza_totalX[cuerpo] = 0.0;
fuerza_totalY[cuerpo] = 0.0;
fuerza_totalZ[cuerpo] = 0.0;
inicializarEstrella(cuerpo, n);
}
masas[0] = 2.0e2;
cPositionX[0] = 0.0;
cPositionY[0] = 0.0;
cPositionZ[0] = 0.0;
cVelocityX[0] = -0.000001;
cVelocityY[0] = -0.000001;
cVelocityZ[0] = 0.0;
masas[1] = 1.0e1;
cPositionX[1] = -1.0;
cPositionY[1] = 0.0;
cPositionZ[1] = 0.0;
cVelocityX[1] = 0.0;
cVelocityY[1] = 0.0001;
cVelocityZ[1] = 0.0;
}
void finalizar(void) {
free(masas);
free(cPositionX);
free(cPositionY);
free(cPositionZ);
free(cVelocityX);
free(cVelocityY);
free(cVelocityZ);
free(fuerza_totalX);
free(fuerza_totalY);
free(fuerza_totalZ);
}
// ===============
// ===== GPU =====
// ===============
__global__ void kernelCalcularFuerzas(int N, int dt, float * gpu_masas, float * gpu_cPositionX, float * gpu_cPositionY, float * gpu_cPositionZ, float * gpu_cVelocityX, float * gpu_cVelocityY, float * gpu_cVelocityZ, float * gpu_fuerza_totalX, float * gpu_fuerza_totalY, float * gpu_fuerza_totalZ) {
extern __shared__ float shared_size[];
//averiguar bien como saltear a la siguiente posicin
float * sh_fuerza_totalX = & shared_size[0];
float * sh_fuerza_totalY = & shared_size[1 * CUDA_BLK];
float * sh_fuerza_totalZ = & shared_size[2 * CUDA_BLK];
float * sh_cPositionX = & shared_size[3 * CUDA_BLK];
float * sh_cPositionY = & shared_size[4 * CUDA_BLK];
float * sh_cPositionZ = & shared_size[5 * CUDA_BLK];
float * sh_cVelocityX = & shared_size[6 * CUDA_BLK];
float * sh_cVelocityY = & shared_size[7 * CUDA_BLK];
float * sh_cVelocityZ = & shared_size[8 * CUDA_BLK];
float * sh_masas = & shared_size[9 * CUDA_BLK];
//CON ESTA SOLUCIN, CONVIENE TENER MS HILOS QUE BLOQUES
//LA MASA SE PUEDE PONER EN MEMORIA CONSTANTE Para eso habra que tenerla en tiempo de ejecucin
//DEFINO EL INDICE DE MI HILO
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, j;
//Itero para todos los bloques
for (j = 0; j < N/CUDA_BLK; j++){
//Accedo coalescentemente a la memoria global para traer los datos a la memoria compartida
for (i = 0; i < CUDA_BLK; i++) {
//OBTENGO LOS DATOS DE MASA Y POSICIN DE MI INDICE, INICIALIZO VECTOR DE FUERZA EN CERO
sh_masas[threadIdx.x] = gpu_masas[idx];
sh_cPositionX[threadIdx.x] = gpu_cPositionX[idx];
sh_cPositionY[threadIdx.x] = gpu_cPositionY[idx];
sh_cPositionZ[threadIdx.x] = gpu_cPositionZ[idx];
sh_cVelocityX[threadIdx.x] = gpu_cVelocityX[idx];
sh_cVelocityY[threadIdx.x] = gpu_cVelocityY[idx];
sh_cVelocityZ[threadIdx.x] = gpu_cVelocityZ[idx];
}
__syncthreads();
//Se sincronizan para asegurar que todos los hilos trajeron los datos a procesar
for (i = 0; i < CUDA_BLK; i++) {
//Fuerzas inicializadas en cero
sh_fuerza_totalX[threadIdx.x] = 0.0f;
sh_fuerza_totalY[threadIdx.xi] = 0.0f;
sh_fuerza_totalZ[threadIdx.x] = 0.0f;
float dif_X = sh_cPositionX[threadIdx.x] - sh_cPositionX[i];
float dif_Y = sh_cPositionY[threadIdx.x] - sh_cPositionY[i];
float dif_Z = sh_cPositionZ[threadIdx.x] - sh_cPositionZ[i];
float distancia = sqrt(dif_X * dif_X + dif_Y * dif_Y + dif_Z * dif_Z);
float F = (G * sh_masas[i] * sh_masas[threadIdx.x]) / (distancia * distancia);
dif_X *= F;
dif_Y *= F;
dif_Z *= F;
sh_fuerza_totalX[threadIdx.x] += dif_X;
sh_fuerza_totalY[threadIdx.x] += dif_Y;
sh_fuerza_totalZ[threadIdx.x] += dif_Z;
}
__syncthreads();
//Se sincronizan para asegurar que todos los hilos terminaron de procesar
//Acceden coalescentemente a la memoria global para escribir los resultados
for (i = 0; i < CUDA_BLK; i++) {
//GUARDO TODOS LOS DATOS EN LA MEMORIA COMPARTIDA, POSICIN, FUERZA, VELOCIDAD.
gpu_fuerza_totalX[idx] = sh_fuerza_totalX[threadIdx.x];
gpu_fuerza_totalY[idx] = sh_fuerza_totalY[threadIdx.x];
gpu_fuerza_totalZ[idx] = sh_fuerza_totalZ[threadIdx.x];
gpu_cPositionX[idx] = sh_cPositionX[threadIdx.x];
gpu_cPositionY[idx] = sh_cPositionY[threadIdx.x];
gpu_cPositionZ[idx] = sh_cPositionZ[threadIdx.x];
gpu_cVelocityX[idx] = sh_cVelocityX[threadIdx.x];
gpu_cVelocityY[idx] = sh_cVelocityY[threadIdx.x];
gpu_cVelocityZ[idx] = sh_cVelocityZ[threadIdx.x];
}
} //Fin de la iteracin por bloques
}
__global__ void kernelMoverCuerpos(int N, int dt, float * gpu_masas, float * gpu_cPositionX, float * gpu_cPositionY, float * gpu_cPositionZ, float * gpu_cVelocityX, float * gpu_cVelocityY, float * gpu_cVelocityZ, float * gpu_fuerza_totalX, float * gpu_fuerza_totalY, float * gpu_fuerza_totalZ) {
//DEFINO EL INDICE DE MI HILO
/*
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int cuerpo;
for(cuerpo = 0; cuerpo < N; cuerpo++){
gpu_fuerza_totalX[cuerpo] *= 1/gpu_masas[cuerpo];
gpu_fuerza_totalY[cuerpo] *= 1/gpu_masas[cuerpo];
gpu_cVelocityX[i] += dt * gpu_fuerza_totalX[i];
gpu_cVelocityY[i] += dt * gpu_fuerza_totalY[i];
gpu_cVelocityZ[i] += dt * gpu_fuerza_totalZ[i];
gpu_cPositionX[cuerpo] += gpu_cVelocityX[cuerpo] *dt;
gpu_cPositionY[cuerpo] += gpu_cVelocityY[cuerpo] *dt;
gpu_fuerza_totalX[cuerpo] = 0.0;
gpu_fuerza_totalY[cuerpo] = 0.0;
gpu_fuerza_totalZ[cuerpo] = 0.0;
}
*/
}
void gravitacionGPU(int N, int dt) {
//TENEMOS QUE REESCRIBIR ESTAS DOS FUNCIONES PARA QUE ANDEN CON EL GPU
// Bloque unidimensional de hilos (*blk_size* hilos)
dim3 dimBlock(CUDA_BLK);
// Grid unidimensional (*ceil(n/blk_size)* bloques)
dim3 dimGrid((N + dimBlock.x - 1) / dimBlock.x);
kernelCalcularFuerzas << < dimGrid, dimBlock >>> (N, dt, gpu_masas, gpu_cPositionX, gpu_cPositionY, gpu_cPositionZ, gpu_cVelocityX, gpu_cVelocityY, gpu_cVelocityZ, gpu_fuerza_totalX, gpu_fuerza_totalY, gpu_fuerza_totalZ);
hipDeviceSynchronize();
//Al kernel de mover cuerpos, le paso todos los parametros de arreglos de la GPU?
kernelMoverCuerpos << < dimGrid, dimBlock >>> (N, dt, gpu_masas, gpu_cPositionX, gpu_cPositionY, gpu_cPositionZ, gpu_cVelocityX, gpu_cVelocityY, gpu_cVelocityZ, gpu_fuerza_totalX, gpu_fuerza_totalY, gpu_fuerza_totalZ);
hipDeviceSynchronize();
}
// ==================
// ===== OpenGL =====
// ==================
//
// Variables OpenGL
//
double alfa = 0.0;
// Para angulo de rotacion y direccion de la camara
float angle = 0.0;
float camAngleX = 0;
float camAngleY = 0;
float distancia = 10;
int ejes = 1;
// Vector actual que representa la direccion de la camara
float lx = 0.0f, lz = -1.0f;
// posicion XZ de la camara
float x = 0.0f, z = 5.0f;
int oldX = 0, oldY = 0;
int rotate = false;
//
// Funciones OpenGL
//
//Funcion que se llama cada vez que se quiere dibujar nuevamente en la pantalla
//Se llama cada vez que se produce el evento render
void GL_camara() {
float camX, camY, camZ;
//Camara mirando al origen (pickObjX,pickObjY,pickObjZ) = (0,0,0)
float pickObjX = 0.0;
float pickObjY = 0.0;
float pickObjZ = 0.0;
camX = distancia * sin(camAngleX);
camY = distancia * sin(camAngleY);
camZ = distancia * cos(camAngleY) * cos(camAngleX);
//Ubicar la camara
gluLookAt(camX, camY, camZ, // Posicion de la camara
pickObjX, pickObjY, pickObjZ, // Mirando al punto
0.0, 1.0, 0.0); // Up vector
}
void GL_dibujarCuerpos(void) {
int i;
for (i = 0; i < N; i++) {
glPushMatrix();
glTranslatef(cPositionX[i], cPositionY[i], cPositionZ[i]);
//reemplazar por los valores random de los colores//
glColor3f((double) rand() / (RAND_MAX + 1.0), (double) rand() / (RAND_MAX + 1.0), (double) rand() / (RAND_MAX + 1.0));
glutSolidSphere(0.02, 20, 20);
glPopMatrix();
}
//ACA!!! se Llama a la funcion que calcula las fuerzas nuevamente
//gravitacion GPU//
gravitacionGPU(N, delta_tiempo);
//TRAERME LOS DATOS DE LA GPU//
hipMemcpy(cPositionX, gpu_cPositionX, N * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(cPositionY, gpu_cPositionY, N * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(cPositionZ, gpu_cPositionZ, N * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(cVelocityX, gpu_cVelocityX, N * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(cVelocityY, gpu_cVelocityY, N * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(cVelocityZ, gpu_cVelocityZ, N * sizeof(float), hipMemcpyDeviceToHost);
//gravitacionCPU(N,delta_tiempo);
}
void GL_dibujar(void) {
// Borra el color y los buffers de profundidad
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Reiniciar la matriz de transformaciones
glLoadIdentity();
//ubica la camara
GL_camara();
//Dibuja los ejes de coordenadas (si estan habilitados)
if (ejes) {
glBegin(GL_LINES);
glColor3f(1.0, 0.0, 0.0);
glVertex3d(0, 0, 0);
glVertex3d(5.0, 0.0, 0.0);
glColor3f(0.0, 1.0, 0.0);
glVertex3d(0, 0, 0);
glVertex3d(0.0, 5.0, 0.0);
glColor3f(0.0, 0.0, 1.0);
glVertex3d(0, 0, 0);
glVertex3d(0, 0, 5.0);
glEnd();
}
// Dibuja
glPushMatrix();
GL_dibujarCuerpos();
glPopMatrix();
glutSwapBuffers();
}
void GL_cambioDeDimensionDeVentana(int w, int h) {
// Evita que se divida por cero cuando la ventana es muy chica
if (h == 0) h = 1;
float ratio = w * 1.0 / h;
// Usa la matriz de proyecion
glMatrixMode(GL_PROJECTION);
// Reset matriz
glLoadIdentity();
// Configura el viewport para la ventana completa
glViewport(0, 0, w, h);
// Configura la perspectiva correcta
gluPerspective(45.0f, ratio, 0.1f, 100.0f);
// Modelview
glMatrixMode(GL_MODELVIEW);
}
//Funcion de inicializacion
void GL_inicio(void) {
glClearColor(0.0, 0.0, 0.0, 0.0);
glOrtho(-10, 10, -10, 10, -10, 10);
}
void GL_teclado(unsigned char key, int x, int y) {
double denominador = 50.0;
double grados = PI / denominador;
switch (key) {
case 'a':
if (alfa + grados >= 2 * PI)
alfa = (alfa + grados) - 2 * PI;
else
alfa += grados;
break;
case '+':
distancia--;
break;
case '-':
distancia++;
break;
case 'e':
if (ejes == 1) {
ejes = 0;
} else {
ejes = 1;
}
break;
case KEY_ESC:
finalizar();
exit(0); // Sale de la aplicacion si se presiona 'Esc'
}
glutPostRedisplay();
}
void GL_teclasEspeciales(int key, int x, int y) {
double denominador = 50.0;
double grados = PI / denominador;
switch (key) {
case GLUT_KEY_RIGHT:
if (camAngleX - grados < 0)
camAngleX = (camAngleX - grados) + 2 * PI;
else
camAngleX -= grados;
break;
case GLUT_KEY_LEFT:
if (camAngleX + grados >= 2 * PI)
camAngleX = (camAngleX + grados) - 2 * PI;
else
camAngleX += grados;
break;
case GLUT_KEY_UP:
if (camAngleY - grados <= -PI / 2)
camAngleY = -PI / 2 + 0.001;
else
camAngleY -= grados;
break;
case GLUT_KEY_DOWN:
if (camAngleY + grados >= PI / 2)
camAngleY = PI / 2 - 0.001;
else
camAngleY += grados;
break;
}
glutPostRedisplay();
}
void GL_OnMouseDown(int button, int state, int x, int y) {
rotate = false;
if (button == GLUT_LEFT_BUTTON) {
oldX = x;
oldY = y;
rotate = true;
}
}
void GL_OnMouseMove(int x, int y) {
if (rotate) {
camAngleX -= (x - oldX) * 0.01f;
camAngleY += (y - oldY) * 0.01f;
}
oldX = x;
oldY = y;
glutPostRedisplay();
}
void procesoOpenGL(int argc, char * argv[]) {
//Inicializa la libreria glut
glutInit( & argc, argv);
//Se va a usar doble buffer, paleta RGB
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
//Define la ventana de visualizacion
glutInitWindowSize(ANCHO, ALTO);
//Posicionar la ventana
glutInitWindowPosition(0, 0);
//Se crea la ventana cuyo nombre en la barra de titulo es lo que viene en argv[0]
glutCreateWindow(argv[0]);
//Funcion personalizada que inicializa parametros
GL_inicio();
//Define cual es la funcion de control de renderizado
// Se llama cada vez que se quiere dibujar nuevamente en la pantalla (cada vez que se produce el evento render)
//GL DIBUJAR LLAMA A NUESTRO CDIGO//
glutDisplayFunc(GL_dibujar);
glutReshapeFunc(GL_cambioDeDimensionDeVentana);
glutIdleFunc(GL_dibujar);
//Define cuales son las funciones que atenderan los eventos del teclado
glutKeyboardFunc(GL_teclado);
glutSpecialFunc(GL_teclasEspeciales);
//Define cuales son las funciones que atenderan los eventos del mouse
glutMouseFunc(GL_OnMouseDown);
glutMotionFunc(GL_OnMouseMove);
//El programa espera aca
glutMainLoop();
}
int main(int argc, char * argv[]) {
N = atoi(argv[1]);
delta_tiempo = atof(argv[2]);
pasos = atoi(argv[3]);
//CPU VARIABLES
cPositionX = (float * ) malloc(N * sizeof(float));
cPositionY = (float * ) malloc(N * sizeof(float));
cPositionZ = (float * ) malloc(N * sizeof(float));
cVelocityX = (float * ) malloc(N * sizeof(float));
cVelocityY = (float * ) malloc(N * sizeof(float));
cVelocityZ = (float * ) malloc(N * sizeof(float));
masas = (float * ) malloc(N * sizeof(float));
fuerza_totalX = (float * ) malloc(sizeof(float) * N);
fuerza_totalY = (float * ) malloc(sizeof(float) * N);
fuerza_totalZ = (float * ) malloc(sizeof(float) * N);
//GPU VARIABLES
gpu_cPositionX = (float * ) malloc(N * sizeof(float));
gpu_cPositionY = (float * ) malloc(N * sizeof(float));
gpu_cPositionZ = (float * ) malloc(N * sizeof(float));
gpu_cVelocityX = (float * ) malloc(N * sizeof(float));
gpu_cVelocityY = (float * ) malloc(N * sizeof(float));
gpu_cVelocityZ = (float * ) malloc(N * sizeof(float));
gpu_masas = (float * ) malloc(N * sizeof(float));
gpu_fuerza_totalX = (float * ) malloc(sizeof(float) * N);
gpu_fuerza_totalY = (float * ) malloc(sizeof(float) * N);
gpu_fuerza_totalZ = (float * ) malloc(sizeof(float) * N);
hipMalloc( & gpu_cPositionX, N * sizeof(float));
hipMalloc( & gpu_cPositionY, N * sizeof(float));
hipMalloc( & gpu_cPositionZ, N * sizeof(float));
hipMalloc( & gpu_cVelocityX, N * sizeof(float));
hipMalloc( & gpu_cVelocityY, N * sizeof(float));
hipMalloc( & gpu_cVelocityZ, N * sizeof(float));
hipMalloc( & gpu_masas, N * sizeof(float));
hipMalloc( & gpu_fuerza_totalX, N * sizeof(float));
hipMalloc( & gpu_fuerza_totalY, N * sizeof(float));
hipMalloc( & gpu_fuerza_totalZ, N * sizeof(float));
inicializarCuerpos(N);
//aca pasamos los datos a la GPU por primera vez//
hipMemcpy(gpu_cPositionX, cPositionX, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_cPositionY, cPositionY, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_cPositionZ, cPositionZ, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_cVelocityX, cVelocityX, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_cVelocityY, cVelocityY, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_cVelocityZ, cVelocityZ, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_masas, masas, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_fuerza_totalX, fuerza_totalX, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_fuerza_totalY, fuerza_totalY, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_fuerza_totalZ, fuerza_totalZ, N * sizeof(float), hipMemcpyHostToDevice);
//ADENTRO DE ESTO SE VA A LLAMAR AL CALCULO//
procesoOpenGL(argc, argv);
return (0);
} | 313b461aee4568f34c75f9cbef149fd928a9f57c.cu | #include <GL/glut.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
//
// Constantes para OpenGL
//
#define KEY_ESC 27
#define ANCHO 1920
#define ALTO 1080
#define false 0
#define true 1
//
// Constantes para Algoritmo de gravitacion
//
#define PI (3.141592653589793)
#define G 6.673e-11
// ===============
// ===== CPU =====
// ===============
//
// Estructuras y variables para Algoritmo de gravitacion
//
const int CUDA_BLK = 64; // Tamaño predeterminado de bloque de hilos CUDA
float toroide_alfa;
float toroide_theta;
float toroide_incremento;
float toroide_lado;
float toroide_r;
float toroide_R;
int delta_tiempo = 1.0f; //Intervalo de tiempo, longitud de un paso
int pasos;
int N;
//variables nuestras CPU//
float * masas;
float * cPositionX;
float * cPositionY;
float * cPositionZ;
float * cVelocityX;
float * cVelocityY;
float * cVelocityZ;
float * fuerza_totalX;
float * fuerza_totalY;
float * fuerza_totalZ;
//variables nuestras GPU//
float * gpu_masas;
float * gpu_cPositionX;
float * gpu_cPositionY;
float * gpu_cPositionZ;
float * gpu_cVelocityX;
float * gpu_cVelocityY;
float * gpu_cVelocityZ;
float * gpu_fuerza_totalX;
float * gpu_fuerza_totalY;
float * gpu_fuerza_totalZ;
double cColorR = (double) rand() / (RAND_MAX + 1.0);
double cColorG = (double) rand() / (RAND_MAX + 1.0);
double cColorB = (double) rand() / (RAND_MAX + 1.0);
//terminan las variables nuestras.//
//
// Funciones para Algoritmo de gravitacion
//
void calcularFuerzas(int N, int dt) {
int cuerpo1, cuerpo2;
float dif_X, dif_Y, dif_Z;
float distancia;
float F;
for (cuerpo1 = 0; cuerpo1 < N - 1; cuerpo1++) {
for (cuerpo2 = cuerpo1 + 1; cuerpo2 < N; cuerpo2++) {
if ((cPositionX[cuerpo1] == cPositionX[cuerpo2]) && (cPositionY[cuerpo1] == cPositionY[cuerpo2]) && (cPositionZ[cuerpo1] == cPositionZ[cuerpo2]))
continue;
dif_X = cPositionX[cuerpo2] - cPositionX[cuerpo1];
dif_Y = cPositionY[cuerpo2] - cPositionY[cuerpo1];
dif_Z = cPositionZ[cuerpo2] - cPositionZ[cuerpo1];
distancia = sqrt(dif_X * dif_X + dif_Y * dif_Y + dif_Z * dif_Z);
F = (G * masas[cuerpo1] * masas[cuerpo2]) / (distancia * distancia);
dif_X *= F;
dif_Y *= F;
dif_Z *= F;
fuerza_totalX[cuerpo1] += dif_X;
fuerza_totalY[cuerpo1] += dif_Y;
fuerza_totalZ[cuerpo1] += dif_Z;
fuerza_totalX[cuerpo2] -= dif_X;
fuerza_totalY[cuerpo2] -= dif_Y;
fuerza_totalZ[cuerpo2] -= dif_Z;
}
}
}
void moverCuerpos(int N, int dt) {
int cuerpo;
for (cuerpo = 0; cuerpo < N; cuerpo++) {
fuerza_totalX[cuerpo] *= 1 / masas[cuerpo];
fuerza_totalY[cuerpo] *= 1 / masas[cuerpo];
cVelocityX[cuerpo] += fuerza_totalX[cuerpo] * dt;
cVelocityY[cuerpo] += fuerza_totalY[cuerpo] * dt;
cPositionX[cuerpo] += cVelocityX[cuerpo] * dt;
cPositionY[cuerpo] += cVelocityY[cuerpo] * dt;
fuerza_totalX[cuerpo] = 0.0;
fuerza_totalY[cuerpo] = 0.0;
fuerza_totalZ[cuerpo] = 0.0;
}
}
void gravitacionCPU(int N, int dt) {
calcularFuerzas(N, dt);
moverCuerpos(N, dt);
}
void inicializarEstrella(int i, double n) {
//todos van a tener la misma masa//
masas[i] = 0.001 * 8;
if ((toroide_alfa + toroide_incremento) >= 2 * M_PI) {
toroide_alfa = 0;
toroide_theta += toroide_incremento;
} else {
toroide_alfa += toroide_incremento;
}
cPositionX[i] = (toroide_R + toroide_r * cos(toroide_alfa)) * cos(toroide_theta);
cPositionY[i] = (toroide_R + toroide_r * cos(toroide_alfa)) * sin(toroide_theta);
cPositionZ[i] = toroide_r * sin(toroide_alfa);
cVelocityX[i] = 0.0;
cVelocityY[i] = 0.0;
cVelocityZ[i] = 0.0;
}
void inicializarCuerpos(int N) {
int cuerpo;
double n = N;
toroide_alfa = 0.0;
toroide_theta = 0.0;
toroide_lado = sqrt(N);
toroide_incremento = 2 * M_PI / toroide_lado;
toroide_r = 1.0;
toroide_R = 2 * toroide_r;
srand(time(NULL));
for (cuerpo = 0; cuerpo < N; cuerpo++) {
fuerza_totalX[cuerpo] = 0.0;
fuerza_totalY[cuerpo] = 0.0;
fuerza_totalZ[cuerpo] = 0.0;
inicializarEstrella(cuerpo, n);
}
masas[0] = 2.0e2;
cPositionX[0] = 0.0;
cPositionY[0] = 0.0;
cPositionZ[0] = 0.0;
cVelocityX[0] = -0.000001;
cVelocityY[0] = -0.000001;
cVelocityZ[0] = 0.0;
masas[1] = 1.0e1;
cPositionX[1] = -1.0;
cPositionY[1] = 0.0;
cPositionZ[1] = 0.0;
cVelocityX[1] = 0.0;
cVelocityY[1] = 0.0001;
cVelocityZ[1] = 0.0;
}
void finalizar(void) {
free(masas);
free(cPositionX);
free(cPositionY);
free(cPositionZ);
free(cVelocityX);
free(cVelocityY);
free(cVelocityZ);
free(fuerza_totalX);
free(fuerza_totalY);
free(fuerza_totalZ);
}
// ===============
// ===== GPU =====
// ===============
__global__ void kernelCalcularFuerzas(int N, int dt, float * gpu_masas, float * gpu_cPositionX, float * gpu_cPositionY, float * gpu_cPositionZ, float * gpu_cVelocityX, float * gpu_cVelocityY, float * gpu_cVelocityZ, float * gpu_fuerza_totalX, float * gpu_fuerza_totalY, float * gpu_fuerza_totalZ) {
extern __shared__ float shared_size[];
//averiguar bien como saltear a la siguiente posición
float * sh_fuerza_totalX = & shared_size[0];
float * sh_fuerza_totalY = & shared_size[1 * CUDA_BLK];
float * sh_fuerza_totalZ = & shared_size[2 * CUDA_BLK];
float * sh_cPositionX = & shared_size[3 * CUDA_BLK];
float * sh_cPositionY = & shared_size[4 * CUDA_BLK];
float * sh_cPositionZ = & shared_size[5 * CUDA_BLK];
float * sh_cVelocityX = & shared_size[6 * CUDA_BLK];
float * sh_cVelocityY = & shared_size[7 * CUDA_BLK];
float * sh_cVelocityZ = & shared_size[8 * CUDA_BLK];
float * sh_masas = & shared_size[9 * CUDA_BLK];
//CON ESTA SOLUCIÓN, CONVIENE TENER MÁS HILOS QUE BLOQUES
//LA MASA SE PUEDE PONER EN MEMORIA CONSTANTE Para eso habría que tenerla en tiempo de ejecución
//DEFINO EL INDICE DE MI HILO
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, j;
//Itero para todos los bloques
for (j = 0; j < N/CUDA_BLK; j++){
//Accedo coalescentemente a la memoria global para traer los datos a la memoria compartida
for (i = 0; i < CUDA_BLK; i++) {
//OBTENGO LOS DATOS DE MASA Y POSICIÓN DE MI INDICE, INICIALIZO VECTOR DE FUERZA EN CERO
sh_masas[threadIdx.x] = gpu_masas[idx];
sh_cPositionX[threadIdx.x] = gpu_cPositionX[idx];
sh_cPositionY[threadIdx.x] = gpu_cPositionY[idx];
sh_cPositionZ[threadIdx.x] = gpu_cPositionZ[idx];
sh_cVelocityX[threadIdx.x] = gpu_cVelocityX[idx];
sh_cVelocityY[threadIdx.x] = gpu_cVelocityY[idx];
sh_cVelocityZ[threadIdx.x] = gpu_cVelocityZ[idx];
}
__syncthreads();
//Se sincronizan para asegurar que todos los hilos trajeron los datos a procesar
for (i = 0; i < CUDA_BLK; i++) {
//Fuerzas inicializadas en cero
sh_fuerza_totalX[threadIdx.x] = 0.0f;
sh_fuerza_totalY[threadIdx.xi] = 0.0f;
sh_fuerza_totalZ[threadIdx.x] = 0.0f;
float dif_X = sh_cPositionX[threadIdx.x] - sh_cPositionX[i];
float dif_Y = sh_cPositionY[threadIdx.x] - sh_cPositionY[i];
float dif_Z = sh_cPositionZ[threadIdx.x] - sh_cPositionZ[i];
float distancia = sqrt(dif_X * dif_X + dif_Y * dif_Y + dif_Z * dif_Z);
float F = (G * sh_masas[i] * sh_masas[threadIdx.x]) / (distancia * distancia);
dif_X *= F;
dif_Y *= F;
dif_Z *= F;
sh_fuerza_totalX[threadIdx.x] += dif_X;
sh_fuerza_totalY[threadIdx.x] += dif_Y;
sh_fuerza_totalZ[threadIdx.x] += dif_Z;
}
__syncthreads();
//Se sincronizan para asegurar que todos los hilos terminaron de procesar
//Acceden coalescentemente a la memoria global para escribir los resultados
for (i = 0; i < CUDA_BLK; i++) {
//GUARDO TODOS LOS DATOS EN LA MEMORIA COMPARTIDA, POSICIÓN, FUERZA, VELOCIDAD.
gpu_fuerza_totalX[idx] = sh_fuerza_totalX[threadIdx.x];
gpu_fuerza_totalY[idx] = sh_fuerza_totalY[threadIdx.x];
gpu_fuerza_totalZ[idx] = sh_fuerza_totalZ[threadIdx.x];
gpu_cPositionX[idx] = sh_cPositionX[threadIdx.x];
gpu_cPositionY[idx] = sh_cPositionY[threadIdx.x];
gpu_cPositionZ[idx] = sh_cPositionZ[threadIdx.x];
gpu_cVelocityX[idx] = sh_cVelocityX[threadIdx.x];
gpu_cVelocityY[idx] = sh_cVelocityY[threadIdx.x];
gpu_cVelocityZ[idx] = sh_cVelocityZ[threadIdx.x];
}
} //Fin de la iteración por bloques
}
__global__ void kernelMoverCuerpos(int N, int dt, float * gpu_masas, float * gpu_cPositionX, float * gpu_cPositionY, float * gpu_cPositionZ, float * gpu_cVelocityX, float * gpu_cVelocityY, float * gpu_cVelocityZ, float * gpu_fuerza_totalX, float * gpu_fuerza_totalY, float * gpu_fuerza_totalZ) {
//DEFINO EL INDICE DE MI HILO
/*
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int cuerpo;
for(cuerpo = 0; cuerpo < N; cuerpo++){
gpu_fuerza_totalX[cuerpo] *= 1/gpu_masas[cuerpo];
gpu_fuerza_totalY[cuerpo] *= 1/gpu_masas[cuerpo];
gpu_cVelocityX[i] += dt * gpu_fuerza_totalX[i];
gpu_cVelocityY[i] += dt * gpu_fuerza_totalY[i];
gpu_cVelocityZ[i] += dt * gpu_fuerza_totalZ[i];
gpu_cPositionX[cuerpo] += gpu_cVelocityX[cuerpo] *dt;
gpu_cPositionY[cuerpo] += gpu_cVelocityY[cuerpo] *dt;
gpu_fuerza_totalX[cuerpo] = 0.0;
gpu_fuerza_totalY[cuerpo] = 0.0;
gpu_fuerza_totalZ[cuerpo] = 0.0;
}
*/
}
void gravitacionGPU(int N, int dt) {
//TENEMOS QUE REESCRIBIR ESTAS DOS FUNCIONES PARA QUE ANDEN CON EL GPU
// Bloque unidimensional de hilos (*blk_size* hilos)
dim3 dimBlock(CUDA_BLK);
// Grid unidimensional (*ceil(n/blk_size)* bloques)
dim3 dimGrid((N + dimBlock.x - 1) / dimBlock.x);
kernelCalcularFuerzas << < dimGrid, dimBlock >>> (N, dt, gpu_masas, gpu_cPositionX, gpu_cPositionY, gpu_cPositionZ, gpu_cVelocityX, gpu_cVelocityY, gpu_cVelocityZ, gpu_fuerza_totalX, gpu_fuerza_totalY, gpu_fuerza_totalZ);
cudaDeviceSynchronize();
//Al kernel de mover cuerpos, le paso todos los parametros de arreglos de la GPU?
kernelMoverCuerpos << < dimGrid, dimBlock >>> (N, dt, gpu_masas, gpu_cPositionX, gpu_cPositionY, gpu_cPositionZ, gpu_cVelocityX, gpu_cVelocityY, gpu_cVelocityZ, gpu_fuerza_totalX, gpu_fuerza_totalY, gpu_fuerza_totalZ);
cudaDeviceSynchronize();
}
// ==================
// ===== OpenGL =====
// ==================
//
// Variables OpenGL
//
double alfa = 0.0;
// Para angulo de rotacion y direccion de la camara
float angle = 0.0;
float camAngleX = 0;
float camAngleY = 0;
float distancia = 10;
int ejes = 1;
// Vector actual que representa la direccion de la camara
float lx = 0.0f, lz = -1.0f;
// posicion XZ de la camara
float x = 0.0f, z = 5.0f;
int oldX = 0, oldY = 0;
int rotate = false;
//
// Funciones OpenGL
//
//Funcion que se llama cada vez que se quiere dibujar nuevamente en la pantalla
//Se llama cada vez que se produce el evento render
void GL_camara() {
float camX, camY, camZ;
//Camara mirando al origen (pickObjX,pickObjY,pickObjZ) = (0,0,0)
float pickObjX = 0.0;
float pickObjY = 0.0;
float pickObjZ = 0.0;
camX = distancia * sin(camAngleX);
camY = distancia * sin(camAngleY);
camZ = distancia * cos(camAngleY) * cos(camAngleX);
//Ubicar la camara
gluLookAt(camX, camY, camZ, // Posicion de la camara
pickObjX, pickObjY, pickObjZ, // Mirando al punto
0.0, 1.0, 0.0); // Up vector
}
void GL_dibujarCuerpos(void) {
int i;
for (i = 0; i < N; i++) {
glPushMatrix();
glTranslatef(cPositionX[i], cPositionY[i], cPositionZ[i]);
//reemplazar por los valores random de los colores//
glColor3f((double) rand() / (RAND_MAX + 1.0), (double) rand() / (RAND_MAX + 1.0), (double) rand() / (RAND_MAX + 1.0));
glutSolidSphere(0.02, 20, 20);
glPopMatrix();
}
//ACA!!! se Llama a la funcion que calcula las fuerzas nuevamente
//gravitacion GPU//
gravitacionGPU(N, delta_tiempo);
//TRAERME LOS DATOS DE LA GPU//
cudaMemcpy(cPositionX, gpu_cPositionX, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(cPositionY, gpu_cPositionY, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(cPositionZ, gpu_cPositionZ, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(cVelocityX, gpu_cVelocityX, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(cVelocityY, gpu_cVelocityY, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(cVelocityZ, gpu_cVelocityZ, N * sizeof(float), cudaMemcpyDeviceToHost);
//gravitacionCPU(N,delta_tiempo);
}
void GL_dibujar(void) {
// Borra el color y los buffers de profundidad
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Reiniciar la matriz de transformaciones
glLoadIdentity();
//ubica la camara
GL_camara();
//Dibuja los ejes de coordenadas (si estan habilitados)
if (ejes) {
glBegin(GL_LINES);
glColor3f(1.0, 0.0, 0.0);
glVertex3d(0, 0, 0);
glVertex3d(5.0, 0.0, 0.0);
glColor3f(0.0, 1.0, 0.0);
glVertex3d(0, 0, 0);
glVertex3d(0.0, 5.0, 0.0);
glColor3f(0.0, 0.0, 1.0);
glVertex3d(0, 0, 0);
glVertex3d(0, 0, 5.0);
glEnd();
}
// Dibuja
glPushMatrix();
GL_dibujarCuerpos();
glPopMatrix();
glutSwapBuffers();
}
void GL_cambioDeDimensionDeVentana(int w, int h) {
// Evita que se divida por cero cuando la ventana es muy chica
if (h == 0) h = 1;
float ratio = w * 1.0 / h;
// Usa la matriz de proyecion
glMatrixMode(GL_PROJECTION);
// Reset matriz
glLoadIdentity();
// Configura el viewport para la ventana completa
glViewport(0, 0, w, h);
// Configura la perspectiva correcta
gluPerspective(45.0f, ratio, 0.1f, 100.0f);
// Modelview
glMatrixMode(GL_MODELVIEW);
}
//Funcion de inicializacion
void GL_inicio(void) {
glClearColor(0.0, 0.0, 0.0, 0.0);
glOrtho(-10, 10, -10, 10, -10, 10);
}
void GL_teclado(unsigned char key, int x, int y) {
double denominador = 50.0;
double grados = PI / denominador;
switch (key) {
case 'a':
if (alfa + grados >= 2 * PI)
alfa = (alfa + grados) - 2 * PI;
else
alfa += grados;
break;
case '+':
distancia--;
break;
case '-':
distancia++;
break;
case 'e':
if (ejes == 1) {
ejes = 0;
} else {
ejes = 1;
}
break;
case KEY_ESC:
finalizar();
exit(0); // Sale de la aplicacion si se presiona 'Esc'
}
glutPostRedisplay();
}
void GL_teclasEspeciales(int key, int x, int y) {
double denominador = 50.0;
double grados = PI / denominador;
switch (key) {
case GLUT_KEY_RIGHT:
if (camAngleX - grados < 0)
camAngleX = (camAngleX - grados) + 2 * PI;
else
camAngleX -= grados;
break;
case GLUT_KEY_LEFT:
if (camAngleX + grados >= 2 * PI)
camAngleX = (camAngleX + grados) - 2 * PI;
else
camAngleX += grados;
break;
case GLUT_KEY_UP:
if (camAngleY - grados <= -PI / 2)
camAngleY = -PI / 2 + 0.001;
else
camAngleY -= grados;
break;
case GLUT_KEY_DOWN:
if (camAngleY + grados >= PI / 2)
camAngleY = PI / 2 - 0.001;
else
camAngleY += grados;
break;
}
glutPostRedisplay();
}
void GL_OnMouseDown(int button, int state, int x, int y) {
rotate = false;
if (button == GLUT_LEFT_BUTTON) {
oldX = x;
oldY = y;
rotate = true;
}
}
void GL_OnMouseMove(int x, int y) {
if (rotate) {
camAngleX -= (x - oldX) * 0.01f;
camAngleY += (y - oldY) * 0.01f;
}
oldX = x;
oldY = y;
glutPostRedisplay();
}
void procesoOpenGL(int argc, char * argv[]) {
//Inicializa la libreria glut
glutInit( & argc, argv);
//Se va a usar doble buffer, paleta RGB
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
//Define la ventana de visualizacion
glutInitWindowSize(ANCHO, ALTO);
//Posicionar la ventana
glutInitWindowPosition(0, 0);
//Se crea la ventana cuyo nombre en la barra de titulo es lo que viene en argv[0]
glutCreateWindow(argv[0]);
//Funcion personalizada que inicializa parametros
GL_inicio();
//Define cual es la funcion de control de renderizado
// Se llama cada vez que se quiere dibujar nuevamente en la pantalla (cada vez que se produce el evento render)
//GL DIBUJAR LLAMA A NUESTRO CÓDIGO//
glutDisplayFunc(GL_dibujar);
glutReshapeFunc(GL_cambioDeDimensionDeVentana);
glutIdleFunc(GL_dibujar);
//Define cuales son las funciones que atenderan los eventos del teclado
glutKeyboardFunc(GL_teclado);
glutSpecialFunc(GL_teclasEspeciales);
//Define cuales son las funciones que atenderan los eventos del mouse
glutMouseFunc(GL_OnMouseDown);
glutMotionFunc(GL_OnMouseMove);
//El programa espera aca
glutMainLoop();
}
int main(int argc, char * argv[]) {
N = atoi(argv[1]);
delta_tiempo = atof(argv[2]);
pasos = atoi(argv[3]);
//CPU VARIABLES
cPositionX = (float * ) malloc(N * sizeof(float));
cPositionY = (float * ) malloc(N * sizeof(float));
cPositionZ = (float * ) malloc(N * sizeof(float));
cVelocityX = (float * ) malloc(N * sizeof(float));
cVelocityY = (float * ) malloc(N * sizeof(float));
cVelocityZ = (float * ) malloc(N * sizeof(float));
masas = (float * ) malloc(N * sizeof(float));
fuerza_totalX = (float * ) malloc(sizeof(float) * N);
fuerza_totalY = (float * ) malloc(sizeof(float) * N);
fuerza_totalZ = (float * ) malloc(sizeof(float) * N);
//GPU VARIABLES
gpu_cPositionX = (float * ) malloc(N * sizeof(float));
gpu_cPositionY = (float * ) malloc(N * sizeof(float));
gpu_cPositionZ = (float * ) malloc(N * sizeof(float));
gpu_cVelocityX = (float * ) malloc(N * sizeof(float));
gpu_cVelocityY = (float * ) malloc(N * sizeof(float));
gpu_cVelocityZ = (float * ) malloc(N * sizeof(float));
gpu_masas = (float * ) malloc(N * sizeof(float));
gpu_fuerza_totalX = (float * ) malloc(sizeof(float) * N);
gpu_fuerza_totalY = (float * ) malloc(sizeof(float) * N);
gpu_fuerza_totalZ = (float * ) malloc(sizeof(float) * N);
cudaMalloc( & gpu_cPositionX, N * sizeof(float));
cudaMalloc( & gpu_cPositionY, N * sizeof(float));
cudaMalloc( & gpu_cPositionZ, N * sizeof(float));
cudaMalloc( & gpu_cVelocityX, N * sizeof(float));
cudaMalloc( & gpu_cVelocityY, N * sizeof(float));
cudaMalloc( & gpu_cVelocityZ, N * sizeof(float));
cudaMalloc( & gpu_masas, N * sizeof(float));
cudaMalloc( & gpu_fuerza_totalX, N * sizeof(float));
cudaMalloc( & gpu_fuerza_totalY, N * sizeof(float));
cudaMalloc( & gpu_fuerza_totalZ, N * sizeof(float));
inicializarCuerpos(N);
//aca pasamos los datos a la GPU por primera vez//
cudaMemcpy(gpu_cPositionX, cPositionX, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_cPositionY, cPositionY, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_cPositionZ, cPositionZ, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_cVelocityX, cVelocityX, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_cVelocityY, cVelocityY, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_cVelocityZ, cVelocityZ, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_masas, masas, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_fuerza_totalX, fuerza_totalX, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_fuerza_totalY, fuerza_totalY, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_fuerza_totalZ, fuerza_totalZ, N * sizeof(float), cudaMemcpyHostToDevice);
//ADENTRO DE ESTO SE VA A LLAMAR AL CALCULO//
procesoOpenGL(argc, argv);
return (0);
} |
2ef2fb781a7b33870d4c8e6961265cafa5e3785f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernSmooth.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
int width = XSIZE;
int height = YSIZE;
unsigned char *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
unsigned char *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
const float *kernel = NULL;
hipMalloc(&kernel, XSIZE*YSIZE);
int kernSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernSmooth), dim3(gridBlock),dim3(threadBlock), 0, 0, N,width,height,in,out,kernel,kernSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernSmooth), dim3(gridBlock),dim3(threadBlock), 0, 0, N,width,height,in,out,kernel,kernSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernSmooth), dim3(gridBlock),dim3(threadBlock), 0, 0, N,width,height,in,out,kernel,kernSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2ef2fb781a7b33870d4c8e6961265cafa5e3785f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernSmooth.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
int width = XSIZE;
int height = YSIZE;
unsigned char *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
unsigned char *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
const float *kernel = NULL;
cudaMalloc(&kernel, XSIZE*YSIZE);
int kernSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernSmooth<<<gridBlock,threadBlock>>>(N,width,height,in,out,kernel,kernSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernSmooth<<<gridBlock,threadBlock>>>(N,width,height,in,out,kernel,kernSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernSmooth<<<gridBlock,threadBlock>>>(N,width,height,in,out,kernel,kernSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b8a87af76b77993f158898cc1142b78a7a26a764.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/hip/cub.h>
#include <ATen/native/hip/Randperm.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/arange.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/randperm_native.h>
#endif
#include <limits>
namespace at::native {
// [Algorithm of randperm]
//
// randperm is implemented by sorting an arange tensor of size n with randomly
// generated keys. When random keys are different from each other, all different
// permutations have the same probability.
//
// However, there is a pitfall here:
// For better performance, these N random keys are generated independently,
// and there is no effort to make sure they are different at the time of generation.
// When two keys are identical, stable sorting algorithms will not permute these two keys.
// As a result, (0, 1) will appear more often than (1, 0).
//
// To overcome this pitfall we first carefully choose the number of bits in these keys,
// so that the probability of having duplicate keys is under a threshold. Let q be the
// threshold probability for having non-duplicate keys, then it can be proved that[1]
// the number of bits required is: ceil(log2(n - (6 n^2 + 1) / (12 log(q))))
//
// Then after sort, we lauch a separate kernel that additionally shuffles any islands
// of values whose keys matched. The algorithm of this kernel is as follows:
// Each thread reads its key and the keys of its neighbors to tell if it's part of an island.
// For each island, the first thread in the island sees a key match at index i+1 but not index i-1.
// This thread considers itself the "island leader". The island leader then reads more indices to
// the right to figure out how big the island is. Most likely, the island will be very small,
// just a few values. The island leader then rolls that many RNG, uses them to additionally
// shuffle values within the island using serial Fisher-Yates, and writes them out.
//
// Reference
// [1] https://osf.io/af2hy/
// The kernels are templated on an opaque, self-aligned type of the correct
// size to avoid redundant kernels for different types of the same size.
namespace {
template <int N> struct alignas(N) OpaqueType { char data[N]; };
}
Tensor& randperm_out_cuda(int64_t n, c10::optional<Generator> generator, Tensor& result) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
result.resize_({n});
auto range = at::arange(n, result.options());
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
void *shuffled_data;
if (result.is_contiguous()) {
shuffled_data = result.data_ptr();
} else {
shuffled = at::empty(n, result.options());
shuffled_data = shuffled.data_ptr();
}
auto opt = TensorOptions().device(result.device());
// See note [Algorithm of randperm]
const double log_threshold_12 = ::log(0.9) * 12;
double nd = static_cast<double>(n);
int bits = ::min(64,
static_cast<int>(::ceil(std::log2(nd - (6 * nd * nd + 1) / log_threshold_12))));
if (n == 0) {
return result;
} else if (bits <= 32) {
// For asserting device type match of the generator and result,
// we deligate that to the 'random_' function below.
auto keys = at::empty(result.sizes(), opt.dtype(kInt)).random_(
std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), generator);
auto keys_tmp = at::empty_like(keys);
auto keys_out = keys_tmp.mutable_data_ptr<int>();
AT_DISPATCH_ALL_TYPES_AND(kHalf, result.scalar_type(), "randperm_out_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto shuffled_data_ = reinterpret_cast<dtype*>(shuffled_data);
auto* range_data = reinterpret_cast<const dtype*>(range.const_data_ptr());
at::cuda::cub::radix_sort_pairs<int, dtype>(
keys.const_data_ptr<int>(), keys_out,
range_data, shuffled_data_,
n, false, 0, bits);
randperm_handle_duplicate_keys(keys_out, shuffled_data_, bits, n, generator);
});
} else {
auto keys = at::empty(result.sizes(), opt.dtype(kLong)).random_(
std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::max(), generator);
auto keys_tmp = at::empty_like(keys);
auto keys_out = keys_tmp.mutable_data_ptr<int64_t>();
AT_DISPATCH_ALL_TYPES_AND(kHalf, result.scalar_type(), "randperm_out_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto shuffled_data_ = reinterpret_cast<dtype*>(shuffled_data);
auto* range_data = reinterpret_cast<const dtype*>(range.data_ptr());
at::cuda::cub::radix_sort_pairs<int64_t, dtype>(
keys.const_data_ptr<int64_t>(), keys_out,
range_data, shuffled_data_,
n, false, 0, bits);
randperm_handle_duplicate_keys(keys_out, shuffled_data_, bits, n, generator);
});
}
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
return result;
}
} // namespace at::native
| b8a87af76b77993f158898cc1142b78a7a26a764.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/cuda/cub.h>
#include <ATen/native/cuda/Randperm.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/arange.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/randperm_native.h>
#endif
#include <limits>
namespace at::native {
// [Algorithm of randperm]
//
// randperm is implemented by sorting an arange tensor of size n with randomly
// generated keys. When random keys are different from each other, all different
// permutations have the same probability.
//
// However, there is a pitfall here:
// For better performance, these N random keys are generated independently,
// and there is no effort to make sure they are different at the time of generation.
// When two keys are identical, stable sorting algorithms will not permute these two keys.
// As a result, (0, 1) will appear more often than (1, 0).
//
// To overcome this pitfall we first carefully choose the number of bits in these keys,
// so that the probability of having duplicate keys is under a threshold. Let q be the
// threshold probability for having non-duplicate keys, then it can be proved that[1]
// the number of bits required is: ceil(log2(n - (6 n^2 + 1) / (12 log(q))))
//
// Then after sort, we lauch a separate kernel that additionally shuffles any islands
// of values whose keys matched. The algorithm of this kernel is as follows:
// Each thread reads its key and the keys of its neighbors to tell if it's part of an island.
// For each island, the first thread in the island sees a key match at index i+1 but not index i-1.
// This thread considers itself the "island leader". The island leader then reads more indices to
// the right to figure out how big the island is. Most likely, the island will be very small,
// just a few values. The island leader then rolls that many RNG, uses them to additionally
// shuffle values within the island using serial Fisher-Yates, and writes them out.
//
// Reference
// [1] https://osf.io/af2hy/
// The kernels are templated on an opaque, self-aligned type of the correct
// size to avoid redundant kernels for different types of the same size.
namespace {
template <int N> struct alignas(N) OpaqueType { char data[N]; };
}
Tensor& randperm_out_cuda(int64_t n, c10::optional<Generator> generator, Tensor& result) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
result.resize_({n});
auto range = at::arange(n, result.options());
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
void *shuffled_data;
if (result.is_contiguous()) {
shuffled_data = result.data_ptr();
} else {
shuffled = at::empty(n, result.options());
shuffled_data = shuffled.data_ptr();
}
auto opt = TensorOptions().device(result.device());
// See note [Algorithm of randperm]
const double log_threshold_12 = std::log(0.9) * 12;
double nd = static_cast<double>(n);
int bits = std::min(64,
static_cast<int>(std::ceil(std::log2(nd - (6 * nd * nd + 1) / log_threshold_12))));
if (n == 0) {
return result;
} else if (bits <= 32) {
// For asserting device type match of the generator and result,
// we deligate that to the 'random_' function below.
auto keys = at::empty(result.sizes(), opt.dtype(kInt)).random_(
std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), generator);
auto keys_tmp = at::empty_like(keys);
auto keys_out = keys_tmp.mutable_data_ptr<int>();
AT_DISPATCH_ALL_TYPES_AND(kHalf, result.scalar_type(), "randperm_out_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto shuffled_data_ = reinterpret_cast<dtype*>(shuffled_data);
auto* range_data = reinterpret_cast<const dtype*>(range.const_data_ptr());
at::cuda::cub::radix_sort_pairs<int, dtype>(
keys.const_data_ptr<int>(), keys_out,
range_data, shuffled_data_,
n, false, 0, bits);
randperm_handle_duplicate_keys(keys_out, shuffled_data_, bits, n, generator);
});
} else {
auto keys = at::empty(result.sizes(), opt.dtype(kLong)).random_(
std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::max(), generator);
auto keys_tmp = at::empty_like(keys);
auto keys_out = keys_tmp.mutable_data_ptr<int64_t>();
AT_DISPATCH_ALL_TYPES_AND(kHalf, result.scalar_type(), "randperm_out_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto shuffled_data_ = reinterpret_cast<dtype*>(shuffled_data);
auto* range_data = reinterpret_cast<const dtype*>(range.data_ptr());
at::cuda::cub::radix_sort_pairs<int64_t, dtype>(
keys.const_data_ptr<int64_t>(), keys_out,
range_data, shuffled_data_,
n, false, 0, bits);
randperm_handle_duplicate_keys(keys_out, shuffled_data_, bits, n, generator);
});
}
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
return result;
}
} // namespace at::native
|
02ec52b4cb20068918791a8a1ad6fe54c6ea2875.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by jiashuai on 17-9-21.
//
#include "thundersvm/kernel/smo_kernel.h"
#include <thrust/sort.h>
#include <thrust/system/hip/detail/par.h>
namespace svm_kernel {
template<typename T>
__device__ int get_block_min(const T *values, int *index) {
int tid = threadIdx.x;
index[tid] = tid;
__syncthreads();
//block size is always the power of 2
for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) {
if (tid < offset) {
if (values[index[tid + offset]] < values[index[tid]]) {
index[tid] = index[tid + offset];
}
}
__syncthreads();
}
return index[0];
}
__global__ void
c_smo_solve_kernel(const int *label, float_type *f_val, float_type *alpha, float_type *alpha_diff,
const int *working_set, int ws_size,
float_type Cp, float_type Cn, const kernel_type *k_mat_rows, const kernel_type *k_mat_diag, int row_len,
float_type eps,
float_type *diff, int max_iter) {
//"row_len" equals to the number of instances in the original training dataset.
//allocate shared memory
extern __shared__ int shared_mem[];
int *f_idx2reduce = shared_mem; //temporary memory for reduction
float_type *f_val2reduce = (float_type *) &shared_mem[ws_size]; //f values used for reduction.
float_type *alpha_i_diff = (float_type *) &shared_mem[ws_size + ws_size * sizeof(float_type) / sizeof(int)]; //delta alpha_i
float_type *alpha_j_diff = &alpha_i_diff[1];
kernel_type *kd = (kernel_type *) &alpha_j_diff[1]; // diagonal elements for kernel matrix
//index, f value and alpha for each instance
int tid = threadIdx.x;
int wsi = working_set[tid];
kd[tid] = k_mat_diag[wsi];
float_type y = label[wsi];
float_type f = f_val[wsi];
float_type a = alpha[wsi];
float_type aold = a;
__syncthreads();
float_type local_eps;
int numOfIter = 0;
while (1) {
//select fUp and fLow
if (is_I_up(a, y, Cp, Cn))
f_val2reduce[tid] = f;
else
f_val2reduce[tid] = INFINITY;
int i = get_block_min(f_val2reduce, f_idx2reduce);
float_type up_value = f_val2reduce[i];
kernel_type kIwsI = k_mat_rows[row_len * i + wsi];//K[i, wsi]
__syncthreads();
if (is_I_low(a, y, Cp, Cn))
f_val2reduce[tid] = -f;
else
f_val2reduce[tid] = INFINITY;
int j1 = get_block_min(f_val2reduce, f_idx2reduce);
float_type low_value = -f_val2reduce[j1];
float_type local_diff = low_value - up_value;
if (numOfIter == 0) {
local_eps = max(eps, 0.1f * local_diff);
if (tid == 0) {
diff[0] = local_diff;
}
}
if (numOfIter > max_iter || local_diff < local_eps) {
alpha[wsi] = a;
alpha_diff[tid] = -(a - aold) * y;
diff[1] = numOfIter;
break;
}
__syncthreads();
//select j2 using second order heuristic
if (-up_value > -f && (is_I_low(a, y, Cp, Cn))) {
float_type aIJ = kd[i] + kd[tid] - 2 * kIwsI;
float_type bIJ = -up_value + f;
f_val2reduce[tid] = (-bIJ * bIJ / aIJ);
} else
f_val2reduce[tid] = INFINITY;
int j2 = get_block_min(f_val2reduce, f_idx2reduce);
//update alpha
if (tid == i)
*alpha_i_diff = y > 0 ? Cp - a : a;
if (tid == j2)
*alpha_j_diff = min(y > 0 ? a : Cn - a, (-up_value + f) / (kd[i] + kd[j2] - 2 * kIwsI));
__syncthreads();
float_type l = min(*alpha_i_diff, *alpha_j_diff);
if (tid == i)
a += l * y;
if (tid == j2)
a -= l * y;
//update f
kernel_type kJ2wsI = k_mat_rows[row_len * j2 + wsi];//K[J2, wsi]
f -= l * (kJ2wsI - kIwsI);
numOfIter++;
}
}
__global__ void
nu_smo_solve_kernel(const int *label, float_type *f_values, float_type *alpha, float_type *alpha_diff,
const int *working_set,
int ws_size, float C, const kernel_type *k_mat_rows, const kernel_type *k_mat_diag, int row_len,
float_type eps,
float_type *diff, int max_iter) {
//"row_len" equals to the number of instances in the original training dataset.
//allocate shared memory
extern __shared__ int shared_mem[];
int *f_idx2reduce = shared_mem; //temporary memory for reduction
float_type *f_val2reduce = (float_type *) &shared_mem[ws_size]; //f values used for reduction.
float_type *alpha_i_diff = (float_type *) &shared_mem[ws_size + ws_size * sizeof(float_type) / sizeof(int)]; //delta alpha_i
float_type *alpha_j_diff = &alpha_i_diff[1];
kernel_type *kd = (kernel_type *) &alpha_j_diff[1]; // diagonal elements for kernel matrix
//index, f value and alpha for each instance
int tid = threadIdx.x;
int wsi = working_set[tid];
kd[tid] = k_mat_diag[wsi];
float_type y = label[wsi];
float_type f = f_values[wsi];
float_type a = alpha[wsi];
float_type aold = a;
__syncthreads();
float_type local_eps;
int numOfIter = 0;
while (1) {
//select I_up (y=+1)
if (y > 0 && a < C)
f_val2reduce[tid] = f;
else
f_val2reduce[tid] = INFINITY;
__syncthreads();
int ip = get_block_min(f_val2reduce, f_idx2reduce);
float_type up_value_p = f_val2reduce[ip];
kernel_type kIpwsI = k_mat_rows[row_len * ip + wsi];//K[i, wsi]
__syncthreads();
//select I_up (y=-1)
if (y < 0 && a > 0)
f_val2reduce[tid] = f;
else
f_val2reduce[tid] = INFINITY;
int in = get_block_min(f_val2reduce, f_idx2reduce);
float_type up_value_n = f_val2reduce[in];
kernel_type kInwsI = k_mat_rows[row_len * in + wsi];//K[i, wsi]
__syncthreads();
//select I_low (y=+1)
if (y > 0 && a > 0)
f_val2reduce[tid] = -f;
else
f_val2reduce[tid] = INFINITY;
int j1p = get_block_min(f_val2reduce, f_idx2reduce);
float_type low_value_p = -f_val2reduce[j1p];
__syncthreads();
//select I_low (y=-1)
if (y < 0 && a < C)
f_val2reduce[tid] = -f;
else
f_val2reduce[tid] = INFINITY;
int j1n = get_block_min(f_val2reduce, f_idx2reduce);
float_type low_value_n = -f_val2reduce[j1n];
float_type local_diff = max(low_value_p - up_value_p, low_value_n - up_value_n);
if (numOfIter == 0) {
local_eps = max(eps, 0.1 * local_diff);
if (tid == 0) {
diff[0] = local_diff;
}
}
if (numOfIter > max_iter || local_diff < local_eps) {
alpha[wsi] = a;
alpha_diff[tid] = -(a - aold) * y;
diff[1] = numOfIter;
break;
}
__syncthreads();
//select j2p using second order heuristic
if (-up_value_p > -f && y > 0 && a > 0) {
float_type aIJ = kd[ip] + kd[tid] - 2 * kIpwsI;
float_type bIJ = -up_value_p + f;
f_val2reduce[tid] = -bIJ * bIJ / aIJ;
} else
f_val2reduce[tid] = INFINITY;
int j2p = get_block_min(f_val2reduce, f_idx2reduce);
float_type f_val_j2p = f_val2reduce[j2p];
__syncthreads();
//select j2n using second order heuristic
if (-up_value_n > -f && y < 0 && a < C) {
float_type aIJ = kd[in] + kd[tid] - 2 * kInwsI;
float_type bIJ = -up_value_n + f;
f_val2reduce[tid] = -bIJ * bIJ / aIJ;
} else
f_val2reduce[tid] = INFINITY;
int j2n = get_block_min(f_val2reduce, f_idx2reduce);
int i, j2;
float_type up_value;
kernel_type kIwsI;
if (f_val_j2p < f_val2reduce[j2n]) {
i = ip;
j2 = j2p;
up_value = up_value_p;
kIwsI = kIpwsI;
} else {
i = in;
j2 = j2n;
kIwsI = kInwsI;
up_value = up_value_n;
}
//update alpha
if (tid == i)
*alpha_i_diff = y > 0 ? C - a : a;
if (tid == j2)
*alpha_j_diff = min(y > 0 ? a : C - a, (-up_value + f) / (kd[i] + kd[j2] - 2 * kIwsI));
__syncthreads();
float_type l = min(*alpha_i_diff, *alpha_j_diff);
if (tid == i)
a += l * y;
if (tid == j2)
a -= l * y;
//update f
kernel_type kJ2wsI = k_mat_rows[row_len * j2 + wsi];//K[J2, wsi]
f -= l * (kJ2wsI - kIwsI);
numOfIter++;
}
}
void
c_smo_solve(const SyncArray<int> &y, SyncArray<float_type> &f_val, SyncArray<float_type> &alpha,
SyncArray<float_type> &alpha_diff,
const SyncArray<int> &working_set, float_type Cp, float_type Cn, const SyncArray<kernel_type> &k_mat_rows,
const SyncArray<kernel_type> &k_mat_diag, int row_len, float_type eps, SyncArray<float_type> &diff,
int max_iter) {
size_t ws_size = working_set.size();
size_t smem_size = 0;
smem_size += ws_size * sizeof(int); //f_idx2reduce
smem_size += ws_size * sizeof(float_type); //f_val2reduce
smem_size += ws_size * sizeof(kernel_type); //kd
smem_size += 2 * sizeof(float_type); //alpha diff
c_smo_solve_kernel << < 1, ws_size, smem_size >> >
(y.device_data(), f_val.device_data(), alpha.device_data(), alpha_diff.device_data(),
working_set.device_data(), ws_size, Cp, Cn, k_mat_rows.device_data(), k_mat_diag.device_data(),
row_len, eps, diff.device_data(), max_iter);
}
void nu_smo_solve(const SyncArray<int> &y, SyncArray<float_type> &f_val, SyncArray<float_type> &alpha,
SyncArray<float_type> &alpha_diff,
const SyncArray<int> &working_set, float_type C, const SyncArray<kernel_type> &k_mat_rows,
const SyncArray<kernel_type> &k_mat_diag, int row_len, float_type eps, SyncArray<float_type> &diff,
int max_iter) {
size_t ws_size = working_set.size();
size_t smem_size = 0;
smem_size += ws_size * sizeof(int); //f_idx2reduce
smem_size += ws_size * sizeof(float_type); //f_val2reduce
smem_size += ws_size * sizeof(kernel_type); //kd
smem_size += 2 * sizeof(float_type); //alpha diff
nu_smo_solve_kernel << < 1, ws_size, smem_size >> >
(y.device_data(), f_val.device_data(), alpha.device_data(), alpha_diff.device_data(),
working_set.device_data(), ws_size, C, k_mat_rows.device_data(), k_mat_diag.device_data(),
row_len, eps, diff.device_data(), max_iter);
}
__global__ void
update_f_kernel(float_type *f, int ws_size, const float_type *alpha_diff, const kernel_type *k_mat_rows,
int n_instances) {
//"n_instances" equals to the number of rows of the whole kernel matrix for both SVC and SVR.
KERNEL_LOOP(idx, n_instances) {//one thread to update multiple fvalues.
double sum_diff = 0;
for (int i = 0; i < ws_size; ++i) {
double d = alpha_diff[i];
if (d != 0) {
sum_diff += d * k_mat_rows[i * n_instances + idx];
}
}
f[idx] -= sum_diff;
}
}
void
update_f(SyncArray<float_type> &f, const SyncArray<float_type> &alpha_diff, const SyncArray<kernel_type> &k_mat_rows,
int n_instances) {
SAFE_KERNEL_LAUNCH(update_f_kernel, f.device_data(), alpha_diff.size(), alpha_diff.device_data(),
k_mat_rows.device_data(), n_instances);
}
void sort_f(SyncArray<float_type> &f_val2sort, SyncArray<int> &f_idx2sort) {
thrust::sort_by_key(thrust::hip::par, f_val2sort.device_data(), f_val2sort.device_data() + f_val2sort.size(),
f_idx2sort.device_data(), thrust::less<float_type>());
}
}
| 02ec52b4cb20068918791a8a1ad6fe54c6ea2875.cu | //
// Created by jiashuai on 17-9-21.
//
#include "thundersvm/kernel/smo_kernel.h"
#include <thrust/sort.h>
#include <thrust/system/cuda/detail/par.h>
namespace svm_kernel {
template<typename T>
__device__ int get_block_min(const T *values, int *index) {
int tid = threadIdx.x;
index[tid] = tid;
__syncthreads();
//block size is always the power of 2
for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) {
if (tid < offset) {
if (values[index[tid + offset]] < values[index[tid]]) {
index[tid] = index[tid + offset];
}
}
__syncthreads();
}
return index[0];
}
__global__ void
c_smo_solve_kernel(const int *label, float_type *f_val, float_type *alpha, float_type *alpha_diff,
const int *working_set, int ws_size,
float_type Cp, float_type Cn, const kernel_type *k_mat_rows, const kernel_type *k_mat_diag, int row_len,
float_type eps,
float_type *diff, int max_iter) {
//"row_len" equals to the number of instances in the original training dataset.
//allocate shared memory
extern __shared__ int shared_mem[];
int *f_idx2reduce = shared_mem; //temporary memory for reduction
float_type *f_val2reduce = (float_type *) &shared_mem[ws_size]; //f values used for reduction.
float_type *alpha_i_diff = (float_type *) &shared_mem[ws_size + ws_size * sizeof(float_type) / sizeof(int)]; //delta alpha_i
float_type *alpha_j_diff = &alpha_i_diff[1];
kernel_type *kd = (kernel_type *) &alpha_j_diff[1]; // diagonal elements for kernel matrix
//index, f value and alpha for each instance
int tid = threadIdx.x;
int wsi = working_set[tid];
kd[tid] = k_mat_diag[wsi];
float_type y = label[wsi];
float_type f = f_val[wsi];
float_type a = alpha[wsi];
float_type aold = a;
__syncthreads();
float_type local_eps;
int numOfIter = 0;
while (1) {
//select fUp and fLow
if (is_I_up(a, y, Cp, Cn))
f_val2reduce[tid] = f;
else
f_val2reduce[tid] = INFINITY;
int i = get_block_min(f_val2reduce, f_idx2reduce);
float_type up_value = f_val2reduce[i];
kernel_type kIwsI = k_mat_rows[row_len * i + wsi];//K[i, wsi]
__syncthreads();
if (is_I_low(a, y, Cp, Cn))
f_val2reduce[tid] = -f;
else
f_val2reduce[tid] = INFINITY;
int j1 = get_block_min(f_val2reduce, f_idx2reduce);
float_type low_value = -f_val2reduce[j1];
float_type local_diff = low_value - up_value;
if (numOfIter == 0) {
local_eps = max(eps, 0.1f * local_diff);
if (tid == 0) {
diff[0] = local_diff;
}
}
if (numOfIter > max_iter || local_diff < local_eps) {
alpha[wsi] = a;
alpha_diff[tid] = -(a - aold) * y;
diff[1] = numOfIter;
break;
}
__syncthreads();
//select j2 using second order heuristic
if (-up_value > -f && (is_I_low(a, y, Cp, Cn))) {
float_type aIJ = kd[i] + kd[tid] - 2 * kIwsI;
float_type bIJ = -up_value + f;
f_val2reduce[tid] = (-bIJ * bIJ / aIJ);
} else
f_val2reduce[tid] = INFINITY;
int j2 = get_block_min(f_val2reduce, f_idx2reduce);
//update alpha
if (tid == i)
*alpha_i_diff = y > 0 ? Cp - a : a;
if (tid == j2)
*alpha_j_diff = min(y > 0 ? a : Cn - a, (-up_value + f) / (kd[i] + kd[j2] - 2 * kIwsI));
__syncthreads();
float_type l = min(*alpha_i_diff, *alpha_j_diff);
if (tid == i)
a += l * y;
if (tid == j2)
a -= l * y;
//update f
kernel_type kJ2wsI = k_mat_rows[row_len * j2 + wsi];//K[J2, wsi]
f -= l * (kJ2wsI - kIwsI);
numOfIter++;
}
}
__global__ void
nu_smo_solve_kernel(const int *label, float_type *f_values, float_type *alpha, float_type *alpha_diff,
const int *working_set,
int ws_size, float C, const kernel_type *k_mat_rows, const kernel_type *k_mat_diag, int row_len,
float_type eps,
float_type *diff, int max_iter) {
//"row_len" equals to the number of instances in the original training dataset.
//allocate shared memory
extern __shared__ int shared_mem[];
int *f_idx2reduce = shared_mem; //temporary memory for reduction
float_type *f_val2reduce = (float_type *) &shared_mem[ws_size]; //f values used for reduction.
float_type *alpha_i_diff = (float_type *) &shared_mem[ws_size + ws_size * sizeof(float_type) / sizeof(int)]; //delta alpha_i
float_type *alpha_j_diff = &alpha_i_diff[1];
kernel_type *kd = (kernel_type *) &alpha_j_diff[1]; // diagonal elements for kernel matrix
//index, f value and alpha for each instance
int tid = threadIdx.x;
int wsi = working_set[tid];
kd[tid] = k_mat_diag[wsi];
float_type y = label[wsi];
float_type f = f_values[wsi];
float_type a = alpha[wsi];
float_type aold = a;
__syncthreads();
float_type local_eps;
int numOfIter = 0;
while (1) {
//select I_up (y=+1)
if (y > 0 && a < C)
f_val2reduce[tid] = f;
else
f_val2reduce[tid] = INFINITY;
__syncthreads();
int ip = get_block_min(f_val2reduce, f_idx2reduce);
float_type up_value_p = f_val2reduce[ip];
kernel_type kIpwsI = k_mat_rows[row_len * ip + wsi];//K[i, wsi]
__syncthreads();
//select I_up (y=-1)
if (y < 0 && a > 0)
f_val2reduce[tid] = f;
else
f_val2reduce[tid] = INFINITY;
int in = get_block_min(f_val2reduce, f_idx2reduce);
float_type up_value_n = f_val2reduce[in];
kernel_type kInwsI = k_mat_rows[row_len * in + wsi];//K[i, wsi]
__syncthreads();
//select I_low (y=+1)
if (y > 0 && a > 0)
f_val2reduce[tid] = -f;
else
f_val2reduce[tid] = INFINITY;
int j1p = get_block_min(f_val2reduce, f_idx2reduce);
float_type low_value_p = -f_val2reduce[j1p];
__syncthreads();
//select I_low (y=-1)
if (y < 0 && a < C)
f_val2reduce[tid] = -f;
else
f_val2reduce[tid] = INFINITY;
int j1n = get_block_min(f_val2reduce, f_idx2reduce);
float_type low_value_n = -f_val2reduce[j1n];
float_type local_diff = max(low_value_p - up_value_p, low_value_n - up_value_n);
if (numOfIter == 0) {
local_eps = max(eps, 0.1 * local_diff);
if (tid == 0) {
diff[0] = local_diff;
}
}
if (numOfIter > max_iter || local_diff < local_eps) {
alpha[wsi] = a;
alpha_diff[tid] = -(a - aold) * y;
diff[1] = numOfIter;
break;
}
__syncthreads();
//select j2p using second order heuristic
if (-up_value_p > -f && y > 0 && a > 0) {
float_type aIJ = kd[ip] + kd[tid] - 2 * kIpwsI;
float_type bIJ = -up_value_p + f;
f_val2reduce[tid] = -bIJ * bIJ / aIJ;
} else
f_val2reduce[tid] = INFINITY;
int j2p = get_block_min(f_val2reduce, f_idx2reduce);
float_type f_val_j2p = f_val2reduce[j2p];
__syncthreads();
//select j2n using second order heuristic
if (-up_value_n > -f && y < 0 && a < C) {
float_type aIJ = kd[in] + kd[tid] - 2 * kInwsI;
float_type bIJ = -up_value_n + f;
f_val2reduce[tid] = -bIJ * bIJ / aIJ;
} else
f_val2reduce[tid] = INFINITY;
int j2n = get_block_min(f_val2reduce, f_idx2reduce);
int i, j2;
float_type up_value;
kernel_type kIwsI;
if (f_val_j2p < f_val2reduce[j2n]) {
i = ip;
j2 = j2p;
up_value = up_value_p;
kIwsI = kIpwsI;
} else {
i = in;
j2 = j2n;
kIwsI = kInwsI;
up_value = up_value_n;
}
//update alpha
if (tid == i)
*alpha_i_diff = y > 0 ? C - a : a;
if (tid == j2)
*alpha_j_diff = min(y > 0 ? a : C - a, (-up_value + f) / (kd[i] + kd[j2] - 2 * kIwsI));
__syncthreads();
float_type l = min(*alpha_i_diff, *alpha_j_diff);
if (tid == i)
a += l * y;
if (tid == j2)
a -= l * y;
//update f
kernel_type kJ2wsI = k_mat_rows[row_len * j2 + wsi];//K[J2, wsi]
f -= l * (kJ2wsI - kIwsI);
numOfIter++;
}
}
void
c_smo_solve(const SyncArray<int> &y, SyncArray<float_type> &f_val, SyncArray<float_type> &alpha,
SyncArray<float_type> &alpha_diff,
const SyncArray<int> &working_set, float_type Cp, float_type Cn, const SyncArray<kernel_type> &k_mat_rows,
const SyncArray<kernel_type> &k_mat_diag, int row_len, float_type eps, SyncArray<float_type> &diff,
int max_iter) {
size_t ws_size = working_set.size();
size_t smem_size = 0;
smem_size += ws_size * sizeof(int); //f_idx2reduce
smem_size += ws_size * sizeof(float_type); //f_val2reduce
smem_size += ws_size * sizeof(kernel_type); //kd
smem_size += 2 * sizeof(float_type); //alpha diff
c_smo_solve_kernel << < 1, ws_size, smem_size >> >
(y.device_data(), f_val.device_data(), alpha.device_data(), alpha_diff.device_data(),
working_set.device_data(), ws_size, Cp, Cn, k_mat_rows.device_data(), k_mat_diag.device_data(),
row_len, eps, diff.device_data(), max_iter);
}
void nu_smo_solve(const SyncArray<int> &y, SyncArray<float_type> &f_val, SyncArray<float_type> &alpha,
SyncArray<float_type> &alpha_diff,
const SyncArray<int> &working_set, float_type C, const SyncArray<kernel_type> &k_mat_rows,
const SyncArray<kernel_type> &k_mat_diag, int row_len, float_type eps, SyncArray<float_type> &diff,
int max_iter) {
size_t ws_size = working_set.size();
size_t smem_size = 0;
smem_size += ws_size * sizeof(int); //f_idx2reduce
smem_size += ws_size * sizeof(float_type); //f_val2reduce
smem_size += ws_size * sizeof(kernel_type); //kd
smem_size += 2 * sizeof(float_type); //alpha diff
nu_smo_solve_kernel << < 1, ws_size, smem_size >> >
(y.device_data(), f_val.device_data(), alpha.device_data(), alpha_diff.device_data(),
working_set.device_data(), ws_size, C, k_mat_rows.device_data(), k_mat_diag.device_data(),
row_len, eps, diff.device_data(), max_iter);
}
__global__ void
update_f_kernel(float_type *f, int ws_size, const float_type *alpha_diff, const kernel_type *k_mat_rows,
int n_instances) {
//"n_instances" equals to the number of rows of the whole kernel matrix for both SVC and SVR.
KERNEL_LOOP(idx, n_instances) {//one thread to update multiple fvalues.
double sum_diff = 0;
for (int i = 0; i < ws_size; ++i) {
double d = alpha_diff[i];
if (d != 0) {
sum_diff += d * k_mat_rows[i * n_instances + idx];
}
}
f[idx] -= sum_diff;
}
}
void
update_f(SyncArray<float_type> &f, const SyncArray<float_type> &alpha_diff, const SyncArray<kernel_type> &k_mat_rows,
int n_instances) {
SAFE_KERNEL_LAUNCH(update_f_kernel, f.device_data(), alpha_diff.size(), alpha_diff.device_data(),
k_mat_rows.device_data(), n_instances);
}
void sort_f(SyncArray<float_type> &f_val2sort, SyncArray<int> &f_idx2sort) {
thrust::sort_by_key(thrust::cuda::par, f_val2sort.device_data(), f_val2sort.device_data() + f_val2sort.size(),
f_idx2sort.device_data(), thrust::less<float_type>());
}
}
|
0ae3d4a0190bcd50adaebc9f854a12b9a46d8653.hip | // !!! This is a file automatically generated by hipify!!!
#include "utilities.h"
#include "blas.h"
#include "kernels.h"
double flopCounter(int M, int N, int numModels, int *hNVars) {
double flop = 0;
// r = y - mu
flop += (double) M * (double) numModels;
// c = X' * r
flop += 2.0 * (double) M * (double) N * (double) numModels;
// abs(c)
flop += (double) N * (double) numModels;
for (int i = 0; i < numModels; i++) {
// G = X(:, A)' * X(:, A)
flop += 2.0 * (double) hNVars[i] * (double) M * (double) hNVars[i];
// b_OLS = G\(X(:,A)'*y) + rand
flop += 2.0 * (double) M * (double) hNVars[i] + 10.0 * (double) hNVars[i] * (double) hNVars[i];
// Inverse ops for G
flop += (2.0 / 3.0) * (double) hNVars[i] * (double) hNVars[i] * (double) hNVars[i];
// d = X(: , A) * b_OLS - mu
flop += 2.0 * (double) M * (double) hNVars[i] + (double) M;
// gamma_tilde
flop += 2.0 * (double) hNVars[i];
// b update + l1 check
flop += 5.0 * (double) hNVars[i];
// norm1
flop += 2.0 * (double) hNVars[i];
}
// cd = X'*d
flop += 2.0 * (double) M * (double) N * (double) numModels;
// gamma
flop += 6.0 * (double) N * (double) numModels;
// mu update
flop += 2.0 * (double) M * (double) numModels;
// norm2
flop += 3.0 * (double) M * (double) numModels;
// norm2 sqrt, step, sb, err
flop += 4.0 * (double) numModels;
// G
flop += 3.0 * (double) M * (double) numModels;
return flop;
}
using namespace thrust::placeholders;
struct absolute: public thrust::unary_function<precision, precision>
{
__host__ __device__
precision operator()(precision x) {
return abs(x);
}
};
struct cdTransform : public thrust::unary_function<thrust::tuple<precision, precision, precision>, precision>
{
__host__ __device__
precision operator()(thrust::tuple<precision, precision, precision> x) {
precision c_val = thrust::get<0>(x);
precision cd_val = thrust::get<1>(x);
precision cmax_val = thrust::get<2>(x);
if (c_val == 0) return inf;
precision val1 = (c_val - cmax_val) / (cd_val - cmax_val);
precision val2 = (c_val + cmax_val) / (cd_val + cmax_val);
val1 = (val1 < eps)? inf: val1;
val2 = (val2 < eps)? inf: val2;
return min(val1, val2);
}
};
void randInit(hiprandGenerator_t &gen, float *&dev, int size) {
hiprandGenerateUniform(gen, dev, size);
}
void randInit(hiprandGenerator_t &gen, double *&dev, int size) {
hiprandGenerateUniformDouble(gen, dev, size);
}
int main(int argc, char *argv[]) {
if (argc < 9) {
printf("Insufficient parameters, required 8! (flatMriPath, numModels, numStreams, max l1, min l2, min g, max vars, max steps)\nInput 0 for a parameter to use it's default value!\n");
return 0;
}
// Reading flattened mri image
precision *X, *Y;
IntegerTuple tuple = read_flat_mri<precision>(argv[1], X, Y);
corr_precision *cX, *cY;
read_flat_mri<corr_precision>(argv[1], cX, cY);
int M = tuple.M, N = tuple.N;
printf("Read FMRI Data X of shape: (%d,%d)\n", M, N);
printf("Read FMRI Data Y of shape: (%d,%d)\n", M, N);
// Number of models to solve in ||l
int numModels = atoi(argv[2]);
numModels = (numModels == 0)? 512: numModels;
int totalModels = N;
printf("Total number of models: %d\n", totalModels);
printf("Number of models in ||l: %d\n", numModels);
int numStreams = atoi(argv[3]);
numStreams = (numStreams == 0)? 8: numStreams;
numStreams = pow(2, int(log(numStreams) / log(2)));
printf("Number of streams: %d\n", numStreams);
precision l1 = atof(argv[4]);
l1 = (l1 == 0)? 1000: l1;
printf("Max L1: %f\n", l1);
precision l2 = atof(argv[5]);
printf("Min L2: %f\n", l2);
precision g = atof(argv[6]);
g = (g == 0)? 0.1: g;
printf("Lambda: %f\n", g);
int maxVariables = atoi(argv[7]);
maxVariables = (maxVariables == 0)? N - 1: maxVariables;
maxVariables = min(min(M, N - 1), maxVariables);
printf("Max Variables: %d\n", maxVariables);
int maxSteps = atoi(argv[8]);
maxSteps = (maxSteps == 0)? 8 * maxVariables: maxSteps;
maxSteps = min(8 * maxVariables, maxSteps);
printf("Max Steps: %d\n", maxSteps);
// Declare all lars variables
int *nVars, *eVars, *step, *lasso, *done, *cidx, *act, *dropidx;
int *info, *infomapper;
int *lVars;
int *hNVars, *hStep, *hdone, *hact, *hLasso, *hDropidx;
precision *cmax, *a1, *a2, *lambda, *gamma;
precision *y, *mu, *r, *betaOls, *d, *gamma_tilde;
precision *beta, *c, *cd, *beta_prev;
precision *rander1, *rander2, *randnrm;
precision alp = 1, bet = 0;
precision *XA[numModels], *XA1[numModels], *G[numModels], *I[numModels], **dXA, **dG, **dI;
precision *ha1, *ha2, *hlambda;
corr_precision corr_alp = 1, corr_bet = 0;
corr_precision *corr_beta, *corr_sb, *corr_y, *corr_tmp, *corr_betaols, *corr_yh, *corr_z;
corr_precision *corr_XA[numModels], *corr_G[numModels], *corr_I[numModels], **corr_dXA, **corr_dG, **corr_dI;
// Initialize all lars variables
init_var<int>(nVars, numModels);
init_var<int>(eVars, numModels);
init_var<int>(step, numModels);
init_var<int>(lasso, numModels);
init_var<int>(done, numModels);
init_var<int>(cidx, numModels);
init_var<int>(act, numModels);
init_var<int>(dropidx, numModels);
init_var<int>(infomapper, numModels);
init_var<int>(info, M * numModels);
init_var<int>(lVars, numModels * M);
hNVars = new int[numModels];
hStep = new int[numModels];
hdone = new int[numModels];
hact = new int[numModels];
hLasso = new int[numModels];
hDropidx = new int[numModels];
init_var<precision>(cmax, numModels);
init_var<precision>(a1, numModels);
init_var<precision>(a2, numModels);
init_var<precision>(lambda, numModels);
init_var<precision>(gamma, numModels);
init_var<precision>(y, numModels * M);
init_var<precision>(mu, numModels * M);
init_var<precision>(r, numModels * M);
init_var<precision>(betaOls, numModels * M);
init_var<precision>(d, numModels * M);
init_var<precision>(gamma_tilde, numModels);
init_var<precision>(beta, numModels * N);
init_var<precision>(c, numModels * N);
init_var<precision>(cd, numModels * N);
init_var<precision>(beta_prev, numModels * N);
init_var<precision>(rander1, numModels * M);
init_var<precision>(rander2, numModels * M);
init_var<precision>(randnrm, numModels);
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, 6ULL);
randInit(gen, rander1, numModels * M);
hiprandSetPseudoRandomGeneratorSeed(gen, 66ULL);
randInit(gen, rander2, numModels * M);
init_var<corr_precision>(corr_beta, numModels * M);
init_var<corr_precision>(corr_sb, numModels * M);
init_var<corr_precision>(corr_y, numModels * M);
init_var<corr_precision>(corr_tmp, numModels * M);
init_var<corr_precision>(corr_betaols, numModels * M);
init_var<corr_precision>(corr_yh, numModels * M);
init_var<corr_precision>(corr_z, numModels * M);
ha1 = new precision[numModels];
ha2 = new precision[numModels];
hlambda = new precision[numModels];
for (int i = 0; i < numModels; i++) {
init_var<precision>(XA[i], M * M);
init_var<precision>(XA1[i], M * M);
init_var<precision>(G[i], M * M);
init_var<precision>(I[i], M * M);
init_var<corr_precision>(corr_XA[i], M * M);
init_var<corr_precision>(corr_G[i], M * M);
init_var<corr_precision>(corr_I[i], M * M);
}
hipMalloc(&dXA, numModels * sizeof(precision *));
hipMemcpy(dXA, XA, numModels * sizeof(precision *), hipMemcpyHostToDevice);
hipMalloc(&dG, numModels * sizeof(precision *));
hipMemcpy(dG, G, numModels * sizeof(precision *), hipMemcpyHostToDevice);
hipMalloc(&dI, numModels * sizeof(precision *));
hipMemcpy(dI, I, numModels * sizeof(precision *), hipMemcpyHostToDevice);
hipMalloc(&corr_dXA, numModels * sizeof(corr_precision *));
hipMemcpy(corr_dXA, corr_XA, numModels * sizeof(corr_precision *), hipMemcpyHostToDevice);
hipMalloc(&corr_dG, numModels * sizeof(corr_precision *));
hipMemcpy(corr_dG, corr_G, numModels * sizeof(corr_precision *), hipMemcpyHostToDevice);
hipMalloc(&corr_dI, numModels * sizeof(corr_precision *));
hipMemcpy(corr_dI, corr_I, numModels * sizeof(corr_precision *), hipMemcpyHostToDevice);
precision **batchXA[maxVariables], **batchG[maxVariables], **batchI[maxVariables], **dBatchXA[maxVariables], **dBatchG[maxVariables], **dBatchI[maxVariables];
for (int i = 0; i < maxVariables; i++) {
batchXA[i] = new precision *[numModels];
batchG[i] = new precision *[numModels];
batchI[i] = new precision *[numModels];
hipMalloc(&dBatchXA[i], numModels * sizeof(precision *));
hipMalloc(&dBatchG[i], numModels * sizeof(precision *));
hipMalloc(&dBatchI[i], numModels * sizeof(precision *));
}
int batchLen[maxVariables];
// Inititalize thrust variables
thrust::device_ptr<precision> c_ptr = thrust::device_pointer_cast(c);
thrust::device_ptr<precision> cd_ptr = thrust::device_pointer_cast(cd);
thrust::device_ptr<precision> cmax_ptr = thrust::device_pointer_cast(cmax);
thrust::device_ptr<precision> gamma_ptr = thrust::device_pointer_cast(gamma);
thrust::device_ptr<int> cidx_ptr = thrust::device_pointer_cast(cidx);
hipblasHandle_t hnd;
hipblasCreate(&hnd);
hipStream_t streams[numStreams];
for (int i = 0; i < numStreams; i++) hipStreamCreate(&streams[i]);
for (int i = 0; i < numModels; i++)
set_model(Y, y + i * M, mu + i * M, beta + i * N, a1 + i, a2 + i, lambda + i, randnrm + i, nVars + i, eVars + i, lasso + i, step + i, done + i, act + i, M, N, i, streams[i & (numStreams - 1)]);
hipDeviceSynchronize();
GpuTimer timer;
std::ofstream stepf("step.csv"), nvarsf("nvars.csv"), a1f("l1.csv"), a2f("err.csv"), lambdaf("G.csv"), betaf("beta.csv");
int top = numModels;
double totalFlop = 0, corr_flop = 0;
double times[25] = {0};
int e = 0;
int completed_count = 0;
std::map<int, int> completed;
while (true) {
int t = 0;
timer.start();
check(nVars, step, a1, a2, lambda, maxVariables, maxSteps, l1, l2, g, done, numModels);
int ctrl = 0;
hipMemcpy(hdone, done, numModels * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(hact, act, numModels * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
ctrl = 1;
break;
}
}
if (ctrl) {
hipMemcpy(hStep, step, numModels * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(hNVars, nVars, numModels * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(hact, act, numModels * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
gatherAll(corr_XA[i], corr_y + i * M, cX, lVars + i * M, hNVars[i], M, N, hact[i], streams[s & (numStreams - 1)]);
s++;
}
}
hipDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
computeSign(corr_sb + i * M, beta + i * N, beta_prev + i * N, lVars + i * M, dropidx + i, lasso + i, hNVars[i], streams[s & (numStreams - 1)]);
s++;
}
}
hipDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
hipblasSetStream(hnd, streams[s & (numStreams - 1)]);
gemm(hnd, HIPBLAS_OP_T, HIPBLAS_OP_N, hNVars[i], hNVars[i], M, &corr_alp, corr_XA[i], M, corr_XA[i], M, &corr_bet, corr_G[i], hNVars[i]);
s++;
}
}
hipDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
hipblasSetStream(hnd, streams[s & (numStreams - 1)]);
getrfBatched(hnd, hNVars[i], corr_dG + i, hNVars[i], NULL, info + i, 1);
s++;
}
}
hipDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
hipblasSetStream(hnd, streams[s & (numStreams - 1)]);
getriBatched(hnd, hNVars[i], corr_dG + i, hNVars[i], NULL, corr_dI + i, hNVars[i], info + i, 1);
s++;
}
}
hipDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
hipblasSetStream(hnd, streams[s & (numStreams - 1)]);
gemv(hnd, HIPBLAS_OP_T, M, hNVars[i], &corr_alp, corr_XA[i], M, corr_y + i * M, 1, &corr_bet, corr_tmp + i * M, 1);
s++;
}
}
hipDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
hipblasSetStream(hnd, streams[s & (numStreams - 1)]);
gemv(hnd, HIPBLAS_OP_N, hNVars[i], hNVars[i], &corr_alp, corr_I[i], hNVars[i], corr_tmp + i * M, 1, &corr_bet, corr_betaols + i * M, 1);
s++;
}
}
hipDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
hipblasSetStream(hnd, streams[s & (numStreams - 1)]);
gemv(hnd, HIPBLAS_OP_N, M, hNVars[i], &corr_alp, corr_XA[i], M, corr_betaols + i * M, 1, &corr_bet, corr_yh + i * M, 1);
s++;
}
}
hipDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
hipblasSetStream(hnd, streams[s & (numStreams - 1)]);
gemv(hnd, HIPBLAS_OP_N, hNVars[i], hNVars[i], &corr_alp, corr_I[i], hNVars[i], corr_sb + i * M, 1, &corr_bet, corr_z + i * M, 1);
s++;
}
}
hipDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
correct(corr_beta + i * M, corr_betaols + i * M, corr_sb + i * M, corr_y + i * M, corr_yh + i * M, corr_z + i * M, a1 + i, a2 + i, lambda + i, l2, g, hNVars[i], M, streams[s & (numStreams - 1)]);
s++;
}
}
hipDeviceSynchronize();
hipMemcpy(ha1, a1, numModels * sizeof(precision), hipMemcpyDeviceToHost);
hipMemcpy(ha2, a2, numModels * sizeof(precision), hipMemcpyDeviceToHost);
hipMemcpy(hlambda, lambda, numModels * sizeof(precision), hipMemcpyDeviceToHost);
for (int i = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
corr_flop += 2.0 * (double) hNVars[i] * (double) M * (double) hNVars[i];
corr_flop += 2.0 * (double) M * (double) hNVars[i] + 2.0 * (double) hNVars[i] * (double) hNVars[i];
corr_flop += (2.0 / 3.0) * (double) hNVars[i] * (double) hNVars[i] * (double) hNVars[i];
corr_flop += 2.0 * (double) M * (double) hNVars[i] + (double) M + 2.0 * (double) M * (double) M;
corr_flop += 2.0 * (double) hNVars[i] * (double) hNVars[i] + 2.0 * (double) hNVars[i] * (double) hNVars[i];
corr_flop += 4.0 * (double) hNVars[i] + 11;
completed[hact[i]] = 1;
completed_count++;
stepf << hact[i] << ", " << hStep[i] << "\n";
nvarsf << hact[i] << ", " << hNVars[i] << "\n";
a1f << hact[i] << ", " << ha1[i] << "\n";
a2f << hact[i] << ", " << ha2[i] << "\n";
lambdaf << hact[i] << ", " << hlambda[i] << "\n";
int hlVars[hNVars[i]];
corr_precision hbeta[hNVars[i]];
hipMemcpy(hlVars, lVars + i * M, hNVars[i] * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(hbeta, corr_beta + i * M, hNVars[i] * sizeof(corr_precision), hipMemcpyDeviceToHost);
for (int j = 0; j < hNVars[i]; j++) betaf << hact[i] << ", " << hlVars[j] << ", " << hbeta[j] << "\n";
}
}
for (int i = 0, s = 0; i < numModels && top < totalModels; i++) {
if (hdone[i] && completed[hact[i]]) {
set_model(Y, y + i * M, mu + i * M, beta + i * N, a1 + i, a2 + i, lambda + i, randnrm + i, nVars + i, eVars + i, lasso + i, step + i, done + i, act + i, M, N, top++, streams[i & (numStreams - 1)]);
s++;
hdone[i] = 0;
}
}
hipDeviceSynchronize();
}
printf("\rCompleted %d models", completed_count);
if (completed_count == totalModels) {
break;
}
timer.stop();
times[t++] += timer.elapsed();
timer.start();
drop(lVars, dropidx, nVars, lasso, M, numModels);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
mat_sub(y, mu, r, numModels * M);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
hipblasSetStream(hnd, NULL);
gemm(hnd, HIPBLAS_OP_N, HIPBLAS_OP_N, N, numModels, M, &alp, X, N, r, M, &bet, c, N);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
exclude(c, lVars, nVars, eVars, act, M, N, numModels, 0);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
thrust::reduce_by_key(
thrust::make_transform_iterator(thrust::make_counting_iterator((int) 0), _1 / N),
thrust::make_transform_iterator(thrust::make_counting_iterator((int) 0), _1 / N) + numModels * N,
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_transform_iterator(c_ptr, absolute()),
thrust::make_transform_iterator(thrust::make_counting_iterator((int) 0), _1 % N)
)
),
thrust::make_discard_iterator(),
thrust::make_zip_iterator(thrust::make_tuple(cmax_ptr, cidx_ptr)),
thrust::equal_to<int>(),
thrust::maximum<thrust::tuple<precision, int> >()
);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
lasso_add(c, lasso, lVars, nVars, cidx, M, N, numModels);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
hipMemcpy(hNVars, nVars, numModels * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(hLasso, lasso, numModels * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(hDropidx, dropidx, numModels * sizeof(int), hipMemcpyDeviceToHost);
int maxVar = hNVars[0];
int hinfomapper[numModels];
for (int i = 0; i < maxVariables; i++) batchLen[i] = 0;
for (int i = 0; i < numModels; i++) {
if (hNVars[i] > maxVar) maxVar = hNVars[i];
batchXA[hNVars[i]][batchLen[hNVars[i]]] = XA[i];
batchG[hNVars[i]][batchLen[hNVars[i]]] = G[i];
batchI[hNVars[i]][batchLen[hNVars[i]]] = I[i];
hinfomapper[i] = hNVars[i] * numModels + batchLen[hNVars[i]];
batchLen[hNVars[i]]++;
}
hipMemcpy(infomapper, hinfomapper, numModels * sizeof(int), hipMemcpyHostToDevice);
for (int i = 0; i < maxVariables; i++) {
if (batchLen[i] > 0) {
hipMemcpy(dBatchXA[i], batchXA[i], batchLen[i] * sizeof(precision *), hipMemcpyHostToDevice);
hipMemcpy(dBatchG[i], batchG[i], batchLen[i] * sizeof(precision *), hipMemcpyHostToDevice);
hipMemcpy(dBatchI[i], batchI[i], batchLen[i] * sizeof(precision *), hipMemcpyHostToDevice);
}
}
hipDeviceSynchronize();
timer.stop();
times[0] += timer.elapsed();
timer.start();
for (int i = 0, s = 0; i < numModels; i++) {
gather(XA[i], XA1[i], X, lVars, hNVars[i], hLasso[i], hDropidx[i], M, N, i, streams[s & (numStreams - 1)]);
s++;
}
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
for (int i = 0, s = 0; i < maxVariables; i++) {
if (batchLen[i] > 0) {
hipblasSetStream(hnd, streams[s & (numStreams - 1)]);
gemmBatched(hnd, HIPBLAS_OP_T, HIPBLAS_OP_N, i, i, M, &alp, dBatchXA[i], M, dBatchXA[i], M, &bet, dBatchG[i], i, batchLen[i]);
s++;
}
}
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
XAyBatched(dXA, y, r, nVars, M, numModels);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
for (int i = 0, s = 0; i < maxVariables; i++) {
if (batchLen[i] > 0) {
hipblasSetStream(hnd, streams[s & (numStreams - 1)]);
getrfBatched(hnd, i, dBatchG[i], i, NULL, info + i * numModels, batchLen[i]);
s++;
}
}
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
for (int i = 0, s = 0; i < maxVariables; i++) {
if (batchLen[i] > 0) {
hipblasSetStream(hnd, streams[s & (numStreams - 1)]);
getriBatched(hnd, i, dBatchG[i], i, NULL, dBatchI[i], i, info + i * numModels, batchLen[i]);
s++;
}
}
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
IrBatched(dI, r, betaOls, nVars, M, numModels, maxVar);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
IrBatched(dI, rander1, r, nVars, M, numModels, maxVar);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
IrBatched(dI, rander2, d, nVars, M, numModels, maxVar);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
checkNan(nVars, eVars, lVars, info, infomapper, r, d, randnrm, M, numModels);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
XAbetaOlsBatched(dXA, betaOls, d, nVars, M, numModels, maxVar);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
mat_sub(d, mu, d, numModels * M);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
gammat(gamma_tilde, beta, betaOls, dropidx, lVars, nVars, lasso, M, N, numModels);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
hipblasSetStream(hnd, NULL);
gemm(hnd, HIPBLAS_OP_N, HIPBLAS_OP_N, N, numModels, M, &alp, X, N, d, M, &bet, cd, N);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
thrust::reduce_by_key(
thrust::make_transform_iterator(thrust::make_counting_iterator((int) 0), _1 / N),
thrust::make_transform_iterator(thrust::make_counting_iterator((int) 0), _1 / N) + numModels * N,
thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(
c_ptr,
cd_ptr,
thrust::make_permutation_iterator(
cmax_ptr,
thrust::make_transform_iterator(thrust::make_counting_iterator((int) 0), _1 / N)
)
)
),
cdTransform()
),
thrust::make_discard_iterator(),
gamma_ptr,
thrust::equal_to<int>(),
thrust::minimum<precision>()
);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
set_gamma(gamma, gamma_tilde, lasso, nVars, maxVariables, M, numModels);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
update(beta, beta_prev, mu, d, betaOls, gamma, dXA, y, a1, a2, lambda, lVars, nVars, step, infomapper, M, N, numModels, l1);
hipDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
totalFlop += flopCounter(M, N, numModels, hNVars);
e++;
}
stepf.close();
nvarsf.close();
a1f.close();
a2f.close();
lambdaf.close();
betaf.close();
// Statistics
double transferTime = times[0];
double execTime = 0;
for (int i = 1; i < 25; i++) execTime += times[i];
printf("\n");
std::ofstream speedf("speed.csv");
for (int i = 0; i < 25; i++) {
speedf << i << ", " << times[i] << "\n";
}
speedf << (corr_flop * 1.0e-9) / (transferTime * 1.0e-3) << ", " << (totalFlop * 1.0e-9) / (execTime * 1.0e-3) << "\n";
speedf.close();
hipFree(X);
hipFree(Y);
hipFree(cX);
hipFree(cY);
hipFree(nVars);
hipFree(eVars);
hipFree(step);
hipFree(lasso);
hipFree(done);
hipFree(cidx);
hipFree(act);
hipFree(dropidx);
hipFree(infomapper);
hipFree(info);
hipFree(lVars);
hipFree(cmax);
hipFree(a1);
hipFree(a2);
hipFree(lambda);
hipFree(gamma);
hipFree(y);
hipFree(mu);
hipFree(r);
hipFree(betaOls);
hipFree(d);
hipFree(gamma_tilde);
hipFree(beta);
hipFree(c);
hipFree(cd);
hipFree(beta_prev);
hipFree(rander1);
hipFree(rander2);
hipFree(randnrm);
hipFree(corr_beta);
hipFree(corr_sb);
hipFree(corr_y);
hipFree(corr_tmp);
hipFree(corr_betaols);
hipFree(corr_yh);
hipFree(corr_z);
for (int i = 0; i < numModels; i++) {
hipFree(XA[i]);
hipFree(XA1[i]);
hipFree(G[i]);
hipFree(I[i]);
hipFree(corr_XA[i]);
hipFree(corr_G[i]);
hipFree(corr_I[i]);
}
for (int i = 0; i < maxVariables; i++) {
hipFree(dBatchXA[i]);
hipFree(dBatchG[i]);
hipFree(dBatchI[i]);
}
for (int i = 0; i < numStreams; i++) hipStreamDestroy(streams[i]);
hipFree(dXA);
hipFree(dG);
hipFree(dI);
hipFree(corr_dXA);
hipFree(corr_dG);
hipFree(corr_dI);
hipblasDestroy(hnd);
return 0;
}
| 0ae3d4a0190bcd50adaebc9f854a12b9a46d8653.cu | #include "utilities.h"
#include "blas.h"
#include "kernels.h"
double flopCounter(int M, int N, int numModels, int *hNVars) {
double flop = 0;
// r = y - mu
flop += (double) M * (double) numModels;
// c = X' * r
flop += 2.0 * (double) M * (double) N * (double) numModels;
// abs(c)
flop += (double) N * (double) numModels;
for (int i = 0; i < numModels; i++) {
// G = X(:, A)' * X(:, A)
flop += 2.0 * (double) hNVars[i] * (double) M * (double) hNVars[i];
// b_OLS = G\(X(:,A)'*y) + rand
flop += 2.0 * (double) M * (double) hNVars[i] + 10.0 * (double) hNVars[i] * (double) hNVars[i];
// Inverse ops for G
flop += (2.0 / 3.0) * (double) hNVars[i] * (double) hNVars[i] * (double) hNVars[i];
// d = X(: , A) * b_OLS - mu
flop += 2.0 * (double) M * (double) hNVars[i] + (double) M;
// gamma_tilde
flop += 2.0 * (double) hNVars[i];
// b update + l1 check
flop += 5.0 * (double) hNVars[i];
// norm1
flop += 2.0 * (double) hNVars[i];
}
// cd = X'*d
flop += 2.0 * (double) M * (double) N * (double) numModels;
// gamma
flop += 6.0 * (double) N * (double) numModels;
// mu update
flop += 2.0 * (double) M * (double) numModels;
// norm2
flop += 3.0 * (double) M * (double) numModels;
// norm2 sqrt, step, sb, err
flop += 4.0 * (double) numModels;
// G
flop += 3.0 * (double) M * (double) numModels;
return flop;
}
using namespace thrust::placeholders;
struct absolute: public thrust::unary_function<precision, precision>
{
__host__ __device__
precision operator()(precision x) {
return abs(x);
}
};
struct cdTransform : public thrust::unary_function<thrust::tuple<precision, precision, precision>, precision>
{
__host__ __device__
precision operator()(thrust::tuple<precision, precision, precision> x) {
precision c_val = thrust::get<0>(x);
precision cd_val = thrust::get<1>(x);
precision cmax_val = thrust::get<2>(x);
if (c_val == 0) return inf;
precision val1 = (c_val - cmax_val) / (cd_val - cmax_val);
precision val2 = (c_val + cmax_val) / (cd_val + cmax_val);
val1 = (val1 < eps)? inf: val1;
val2 = (val2 < eps)? inf: val2;
return min(val1, val2);
}
};
void randInit(curandGenerator_t &gen, float *&dev, int size) {
curandGenerateUniform(gen, dev, size);
}
void randInit(curandGenerator_t &gen, double *&dev, int size) {
curandGenerateUniformDouble(gen, dev, size);
}
int main(int argc, char *argv[]) {
if (argc < 9) {
printf("Insufficient parameters, required 8! (flatMriPath, numModels, numStreams, max l1, min l2, min g, max vars, max steps)\nInput 0 for a parameter to use it's default value!\n");
return 0;
}
// Reading flattened mri image
precision *X, *Y;
IntegerTuple tuple = read_flat_mri<precision>(argv[1], X, Y);
corr_precision *cX, *cY;
read_flat_mri<corr_precision>(argv[1], cX, cY);
int M = tuple.M, N = tuple.N;
printf("Read FMRI Data X of shape: (%d,%d)\n", M, N);
printf("Read FMRI Data Y of shape: (%d,%d)\n", M, N);
// Number of models to solve in ||l
int numModels = atoi(argv[2]);
numModels = (numModels == 0)? 512: numModels;
int totalModels = N;
printf("Total number of models: %d\n", totalModels);
printf("Number of models in ||l: %d\n", numModels);
int numStreams = atoi(argv[3]);
numStreams = (numStreams == 0)? 8: numStreams;
numStreams = pow(2, int(log(numStreams) / log(2)));
printf("Number of streams: %d\n", numStreams);
precision l1 = atof(argv[4]);
l1 = (l1 == 0)? 1000: l1;
printf("Max L1: %f\n", l1);
precision l2 = atof(argv[5]);
printf("Min L2: %f\n", l2);
precision g = atof(argv[6]);
g = (g == 0)? 0.1: g;
printf("Lambda: %f\n", g);
int maxVariables = atoi(argv[7]);
maxVariables = (maxVariables == 0)? N - 1: maxVariables;
maxVariables = min(min(M, N - 1), maxVariables);
printf("Max Variables: %d\n", maxVariables);
int maxSteps = atoi(argv[8]);
maxSteps = (maxSteps == 0)? 8 * maxVariables: maxSteps;
maxSteps = min(8 * maxVariables, maxSteps);
printf("Max Steps: %d\n", maxSteps);
// Declare all lars variables
int *nVars, *eVars, *step, *lasso, *done, *cidx, *act, *dropidx;
int *info, *infomapper;
int *lVars;
int *hNVars, *hStep, *hdone, *hact, *hLasso, *hDropidx;
precision *cmax, *a1, *a2, *lambda, *gamma;
precision *y, *mu, *r, *betaOls, *d, *gamma_tilde;
precision *beta, *c, *cd, *beta_prev;
precision *rander1, *rander2, *randnrm;
precision alp = 1, bet = 0;
precision *XA[numModels], *XA1[numModels], *G[numModels], *I[numModels], **dXA, **dG, **dI;
precision *ha1, *ha2, *hlambda;
corr_precision corr_alp = 1, corr_bet = 0;
corr_precision *corr_beta, *corr_sb, *corr_y, *corr_tmp, *corr_betaols, *corr_yh, *corr_z;
corr_precision *corr_XA[numModels], *corr_G[numModels], *corr_I[numModels], **corr_dXA, **corr_dG, **corr_dI;
// Initialize all lars variables
init_var<int>(nVars, numModels);
init_var<int>(eVars, numModels);
init_var<int>(step, numModels);
init_var<int>(lasso, numModels);
init_var<int>(done, numModels);
init_var<int>(cidx, numModels);
init_var<int>(act, numModels);
init_var<int>(dropidx, numModels);
init_var<int>(infomapper, numModels);
init_var<int>(info, M * numModels);
init_var<int>(lVars, numModels * M);
hNVars = new int[numModels];
hStep = new int[numModels];
hdone = new int[numModels];
hact = new int[numModels];
hLasso = new int[numModels];
hDropidx = new int[numModels];
init_var<precision>(cmax, numModels);
init_var<precision>(a1, numModels);
init_var<precision>(a2, numModels);
init_var<precision>(lambda, numModels);
init_var<precision>(gamma, numModels);
init_var<precision>(y, numModels * M);
init_var<precision>(mu, numModels * M);
init_var<precision>(r, numModels * M);
init_var<precision>(betaOls, numModels * M);
init_var<precision>(d, numModels * M);
init_var<precision>(gamma_tilde, numModels);
init_var<precision>(beta, numModels * N);
init_var<precision>(c, numModels * N);
init_var<precision>(cd, numModels * N);
init_var<precision>(beta_prev, numModels * N);
init_var<precision>(rander1, numModels * M);
init_var<precision>(rander2, numModels * M);
init_var<precision>(randnrm, numModels);
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, 6ULL);
randInit(gen, rander1, numModels * M);
curandSetPseudoRandomGeneratorSeed(gen, 66ULL);
randInit(gen, rander2, numModels * M);
init_var<corr_precision>(corr_beta, numModels * M);
init_var<corr_precision>(corr_sb, numModels * M);
init_var<corr_precision>(corr_y, numModels * M);
init_var<corr_precision>(corr_tmp, numModels * M);
init_var<corr_precision>(corr_betaols, numModels * M);
init_var<corr_precision>(corr_yh, numModels * M);
init_var<corr_precision>(corr_z, numModels * M);
ha1 = new precision[numModels];
ha2 = new precision[numModels];
hlambda = new precision[numModels];
for (int i = 0; i < numModels; i++) {
init_var<precision>(XA[i], M * M);
init_var<precision>(XA1[i], M * M);
init_var<precision>(G[i], M * M);
init_var<precision>(I[i], M * M);
init_var<corr_precision>(corr_XA[i], M * M);
init_var<corr_precision>(corr_G[i], M * M);
init_var<corr_precision>(corr_I[i], M * M);
}
cudaMalloc(&dXA, numModels * sizeof(precision *));
cudaMemcpy(dXA, XA, numModels * sizeof(precision *), cudaMemcpyHostToDevice);
cudaMalloc(&dG, numModels * sizeof(precision *));
cudaMemcpy(dG, G, numModels * sizeof(precision *), cudaMemcpyHostToDevice);
cudaMalloc(&dI, numModels * sizeof(precision *));
cudaMemcpy(dI, I, numModels * sizeof(precision *), cudaMemcpyHostToDevice);
cudaMalloc(&corr_dXA, numModels * sizeof(corr_precision *));
cudaMemcpy(corr_dXA, corr_XA, numModels * sizeof(corr_precision *), cudaMemcpyHostToDevice);
cudaMalloc(&corr_dG, numModels * sizeof(corr_precision *));
cudaMemcpy(corr_dG, corr_G, numModels * sizeof(corr_precision *), cudaMemcpyHostToDevice);
cudaMalloc(&corr_dI, numModels * sizeof(corr_precision *));
cudaMemcpy(corr_dI, corr_I, numModels * sizeof(corr_precision *), cudaMemcpyHostToDevice);
precision **batchXA[maxVariables], **batchG[maxVariables], **batchI[maxVariables], **dBatchXA[maxVariables], **dBatchG[maxVariables], **dBatchI[maxVariables];
for (int i = 0; i < maxVariables; i++) {
batchXA[i] = new precision *[numModels];
batchG[i] = new precision *[numModels];
batchI[i] = new precision *[numModels];
cudaMalloc(&dBatchXA[i], numModels * sizeof(precision *));
cudaMalloc(&dBatchG[i], numModels * sizeof(precision *));
cudaMalloc(&dBatchI[i], numModels * sizeof(precision *));
}
int batchLen[maxVariables];
// Inititalize thrust variables
thrust::device_ptr<precision> c_ptr = thrust::device_pointer_cast(c);
thrust::device_ptr<precision> cd_ptr = thrust::device_pointer_cast(cd);
thrust::device_ptr<precision> cmax_ptr = thrust::device_pointer_cast(cmax);
thrust::device_ptr<precision> gamma_ptr = thrust::device_pointer_cast(gamma);
thrust::device_ptr<int> cidx_ptr = thrust::device_pointer_cast(cidx);
cublasHandle_t hnd;
cublasCreate(&hnd);
cudaStream_t streams[numStreams];
for (int i = 0; i < numStreams; i++) cudaStreamCreate(&streams[i]);
for (int i = 0; i < numModels; i++)
set_model(Y, y + i * M, mu + i * M, beta + i * N, a1 + i, a2 + i, lambda + i, randnrm + i, nVars + i, eVars + i, lasso + i, step + i, done + i, act + i, M, N, i, streams[i & (numStreams - 1)]);
cudaDeviceSynchronize();
GpuTimer timer;
std::ofstream stepf("step.csv"), nvarsf("nvars.csv"), a1f("l1.csv"), a2f("err.csv"), lambdaf("G.csv"), betaf("beta.csv");
int top = numModels;
double totalFlop = 0, corr_flop = 0;
double times[25] = {0};
int e = 0;
int completed_count = 0;
std::map<int, int> completed;
while (true) {
int t = 0;
timer.start();
check(nVars, step, a1, a2, lambda, maxVariables, maxSteps, l1, l2, g, done, numModels);
int ctrl = 0;
cudaMemcpy(hdone, done, numModels * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(hact, act, numModels * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
ctrl = 1;
break;
}
}
if (ctrl) {
cudaMemcpy(hStep, step, numModels * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(hNVars, nVars, numModels * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(hact, act, numModels * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
gatherAll(corr_XA[i], corr_y + i * M, cX, lVars + i * M, hNVars[i], M, N, hact[i], streams[s & (numStreams - 1)]);
s++;
}
}
cudaDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
computeSign(corr_sb + i * M, beta + i * N, beta_prev + i * N, lVars + i * M, dropidx + i, lasso + i, hNVars[i], streams[s & (numStreams - 1)]);
s++;
}
}
cudaDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
cublasSetStream(hnd, streams[s & (numStreams - 1)]);
gemm(hnd, CUBLAS_OP_T, CUBLAS_OP_N, hNVars[i], hNVars[i], M, &corr_alp, corr_XA[i], M, corr_XA[i], M, &corr_bet, corr_G[i], hNVars[i]);
s++;
}
}
cudaDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
cublasSetStream(hnd, streams[s & (numStreams - 1)]);
getrfBatched(hnd, hNVars[i], corr_dG + i, hNVars[i], NULL, info + i, 1);
s++;
}
}
cudaDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
cublasSetStream(hnd, streams[s & (numStreams - 1)]);
getriBatched(hnd, hNVars[i], corr_dG + i, hNVars[i], NULL, corr_dI + i, hNVars[i], info + i, 1);
s++;
}
}
cudaDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
cublasSetStream(hnd, streams[s & (numStreams - 1)]);
gemv(hnd, CUBLAS_OP_T, M, hNVars[i], &corr_alp, corr_XA[i], M, corr_y + i * M, 1, &corr_bet, corr_tmp + i * M, 1);
s++;
}
}
cudaDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
cublasSetStream(hnd, streams[s & (numStreams - 1)]);
gemv(hnd, CUBLAS_OP_N, hNVars[i], hNVars[i], &corr_alp, corr_I[i], hNVars[i], corr_tmp + i * M, 1, &corr_bet, corr_betaols + i * M, 1);
s++;
}
}
cudaDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
cublasSetStream(hnd, streams[s & (numStreams - 1)]);
gemv(hnd, CUBLAS_OP_N, M, hNVars[i], &corr_alp, corr_XA[i], M, corr_betaols + i * M, 1, &corr_bet, corr_yh + i * M, 1);
s++;
}
}
cudaDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
cublasSetStream(hnd, streams[s & (numStreams - 1)]);
gemv(hnd, CUBLAS_OP_N, hNVars[i], hNVars[i], &corr_alp, corr_I[i], hNVars[i], corr_sb + i * M, 1, &corr_bet, corr_z + i * M, 1);
s++;
}
}
cudaDeviceSynchronize();
for (int i = 0, s = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
correct(corr_beta + i * M, corr_betaols + i * M, corr_sb + i * M, corr_y + i * M, corr_yh + i * M, corr_z + i * M, a1 + i, a2 + i, lambda + i, l2, g, hNVars[i], M, streams[s & (numStreams - 1)]);
s++;
}
}
cudaDeviceSynchronize();
cudaMemcpy(ha1, a1, numModels * sizeof(precision), cudaMemcpyDeviceToHost);
cudaMemcpy(ha2, a2, numModels * sizeof(precision), cudaMemcpyDeviceToHost);
cudaMemcpy(hlambda, lambda, numModels * sizeof(precision), cudaMemcpyDeviceToHost);
for (int i = 0; i < numModels; i++) {
if (hdone[i] && !completed[hact[i]]) {
corr_flop += 2.0 * (double) hNVars[i] * (double) M * (double) hNVars[i];
corr_flop += 2.0 * (double) M * (double) hNVars[i] + 2.0 * (double) hNVars[i] * (double) hNVars[i];
corr_flop += (2.0 / 3.0) * (double) hNVars[i] * (double) hNVars[i] * (double) hNVars[i];
corr_flop += 2.0 * (double) M * (double) hNVars[i] + (double) M + 2.0 * (double) M * (double) M;
corr_flop += 2.0 * (double) hNVars[i] * (double) hNVars[i] + 2.0 * (double) hNVars[i] * (double) hNVars[i];
corr_flop += 4.0 * (double) hNVars[i] + 11;
completed[hact[i]] = 1;
completed_count++;
stepf << hact[i] << ", " << hStep[i] << "\n";
nvarsf << hact[i] << ", " << hNVars[i] << "\n";
a1f << hact[i] << ", " << ha1[i] << "\n";
a2f << hact[i] << ", " << ha2[i] << "\n";
lambdaf << hact[i] << ", " << hlambda[i] << "\n";
int hlVars[hNVars[i]];
corr_precision hbeta[hNVars[i]];
cudaMemcpy(hlVars, lVars + i * M, hNVars[i] * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(hbeta, corr_beta + i * M, hNVars[i] * sizeof(corr_precision), cudaMemcpyDeviceToHost);
for (int j = 0; j < hNVars[i]; j++) betaf << hact[i] << ", " << hlVars[j] << ", " << hbeta[j] << "\n";
}
}
for (int i = 0, s = 0; i < numModels && top < totalModels; i++) {
if (hdone[i] && completed[hact[i]]) {
set_model(Y, y + i * M, mu + i * M, beta + i * N, a1 + i, a2 + i, lambda + i, randnrm + i, nVars + i, eVars + i, lasso + i, step + i, done + i, act + i, M, N, top++, streams[i & (numStreams - 1)]);
s++;
hdone[i] = 0;
}
}
cudaDeviceSynchronize();
}
printf("\rCompleted %d models", completed_count);
if (completed_count == totalModels) {
break;
}
timer.stop();
times[t++] += timer.elapsed();
timer.start();
drop(lVars, dropidx, nVars, lasso, M, numModels);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
mat_sub(y, mu, r, numModels * M);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
cublasSetStream(hnd, NULL);
gemm(hnd, CUBLAS_OP_N, CUBLAS_OP_N, N, numModels, M, &alp, X, N, r, M, &bet, c, N);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
exclude(c, lVars, nVars, eVars, act, M, N, numModels, 0);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
thrust::reduce_by_key(
thrust::make_transform_iterator(thrust::make_counting_iterator((int) 0), _1 / N),
thrust::make_transform_iterator(thrust::make_counting_iterator((int) 0), _1 / N) + numModels * N,
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_transform_iterator(c_ptr, absolute()),
thrust::make_transform_iterator(thrust::make_counting_iterator((int) 0), _1 % N)
)
),
thrust::make_discard_iterator(),
thrust::make_zip_iterator(thrust::make_tuple(cmax_ptr, cidx_ptr)),
thrust::equal_to<int>(),
thrust::maximum<thrust::tuple<precision, int> >()
);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
lasso_add(c, lasso, lVars, nVars, cidx, M, N, numModels);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
cudaMemcpy(hNVars, nVars, numModels * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(hLasso, lasso, numModels * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(hDropidx, dropidx, numModels * sizeof(int), cudaMemcpyDeviceToHost);
int maxVar = hNVars[0];
int hinfomapper[numModels];
for (int i = 0; i < maxVariables; i++) batchLen[i] = 0;
for (int i = 0; i < numModels; i++) {
if (hNVars[i] > maxVar) maxVar = hNVars[i];
batchXA[hNVars[i]][batchLen[hNVars[i]]] = XA[i];
batchG[hNVars[i]][batchLen[hNVars[i]]] = G[i];
batchI[hNVars[i]][batchLen[hNVars[i]]] = I[i];
hinfomapper[i] = hNVars[i] * numModels + batchLen[hNVars[i]];
batchLen[hNVars[i]]++;
}
cudaMemcpy(infomapper, hinfomapper, numModels * sizeof(int), cudaMemcpyHostToDevice);
for (int i = 0; i < maxVariables; i++) {
if (batchLen[i] > 0) {
cudaMemcpy(dBatchXA[i], batchXA[i], batchLen[i] * sizeof(precision *), cudaMemcpyHostToDevice);
cudaMemcpy(dBatchG[i], batchG[i], batchLen[i] * sizeof(precision *), cudaMemcpyHostToDevice);
cudaMemcpy(dBatchI[i], batchI[i], batchLen[i] * sizeof(precision *), cudaMemcpyHostToDevice);
}
}
cudaDeviceSynchronize();
timer.stop();
times[0] += timer.elapsed();
timer.start();
for (int i = 0, s = 0; i < numModels; i++) {
gather(XA[i], XA1[i], X, lVars, hNVars[i], hLasso[i], hDropidx[i], M, N, i, streams[s & (numStreams - 1)]);
s++;
}
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
for (int i = 0, s = 0; i < maxVariables; i++) {
if (batchLen[i] > 0) {
cublasSetStream(hnd, streams[s & (numStreams - 1)]);
gemmBatched(hnd, CUBLAS_OP_T, CUBLAS_OP_N, i, i, M, &alp, dBatchXA[i], M, dBatchXA[i], M, &bet, dBatchG[i], i, batchLen[i]);
s++;
}
}
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
XAyBatched(dXA, y, r, nVars, M, numModels);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
for (int i = 0, s = 0; i < maxVariables; i++) {
if (batchLen[i] > 0) {
cublasSetStream(hnd, streams[s & (numStreams - 1)]);
getrfBatched(hnd, i, dBatchG[i], i, NULL, info + i * numModels, batchLen[i]);
s++;
}
}
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
for (int i = 0, s = 0; i < maxVariables; i++) {
if (batchLen[i] > 0) {
cublasSetStream(hnd, streams[s & (numStreams - 1)]);
getriBatched(hnd, i, dBatchG[i], i, NULL, dBatchI[i], i, info + i * numModels, batchLen[i]);
s++;
}
}
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
IrBatched(dI, r, betaOls, nVars, M, numModels, maxVar);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
IrBatched(dI, rander1, r, nVars, M, numModels, maxVar);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
IrBatched(dI, rander2, d, nVars, M, numModels, maxVar);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
checkNan(nVars, eVars, lVars, info, infomapper, r, d, randnrm, M, numModels);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
XAbetaOlsBatched(dXA, betaOls, d, nVars, M, numModels, maxVar);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
mat_sub(d, mu, d, numModels * M);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
gammat(gamma_tilde, beta, betaOls, dropidx, lVars, nVars, lasso, M, N, numModels);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
cublasSetStream(hnd, NULL);
gemm(hnd, CUBLAS_OP_N, CUBLAS_OP_N, N, numModels, M, &alp, X, N, d, M, &bet, cd, N);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
thrust::reduce_by_key(
thrust::make_transform_iterator(thrust::make_counting_iterator((int) 0), _1 / N),
thrust::make_transform_iterator(thrust::make_counting_iterator((int) 0), _1 / N) + numModels * N,
thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(
c_ptr,
cd_ptr,
thrust::make_permutation_iterator(
cmax_ptr,
thrust::make_transform_iterator(thrust::make_counting_iterator((int) 0), _1 / N)
)
)
),
cdTransform()
),
thrust::make_discard_iterator(),
gamma_ptr,
thrust::equal_to<int>(),
thrust::minimum<precision>()
);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
set_gamma(gamma, gamma_tilde, lasso, nVars, maxVariables, M, numModels);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
timer.start();
update(beta, beta_prev, mu, d, betaOls, gamma, dXA, y, a1, a2, lambda, lVars, nVars, step, infomapper, M, N, numModels, l1);
cudaDeviceSynchronize();
timer.stop();
times[t++] += timer.elapsed();
totalFlop += flopCounter(M, N, numModels, hNVars);
e++;
}
stepf.close();
nvarsf.close();
a1f.close();
a2f.close();
lambdaf.close();
betaf.close();
// Statistics
double transferTime = times[0];
double execTime = 0;
for (int i = 1; i < 25; i++) execTime += times[i];
printf("\n");
std::ofstream speedf("speed.csv");
for (int i = 0; i < 25; i++) {
speedf << i << ", " << times[i] << "\n";
}
speedf << (corr_flop * 1.0e-9) / (transferTime * 1.0e-3) << ", " << (totalFlop * 1.0e-9) / (execTime * 1.0e-3) << "\n";
speedf.close();
cudaFree(X);
cudaFree(Y);
cudaFree(cX);
cudaFree(cY);
cudaFree(nVars);
cudaFree(eVars);
cudaFree(step);
cudaFree(lasso);
cudaFree(done);
cudaFree(cidx);
cudaFree(act);
cudaFree(dropidx);
cudaFree(infomapper);
cudaFree(info);
cudaFree(lVars);
cudaFree(cmax);
cudaFree(a1);
cudaFree(a2);
cudaFree(lambda);
cudaFree(gamma);
cudaFree(y);
cudaFree(mu);
cudaFree(r);
cudaFree(betaOls);
cudaFree(d);
cudaFree(gamma_tilde);
cudaFree(beta);
cudaFree(c);
cudaFree(cd);
cudaFree(beta_prev);
cudaFree(rander1);
cudaFree(rander2);
cudaFree(randnrm);
cudaFree(corr_beta);
cudaFree(corr_sb);
cudaFree(corr_y);
cudaFree(corr_tmp);
cudaFree(corr_betaols);
cudaFree(corr_yh);
cudaFree(corr_z);
for (int i = 0; i < numModels; i++) {
cudaFree(XA[i]);
cudaFree(XA1[i]);
cudaFree(G[i]);
cudaFree(I[i]);
cudaFree(corr_XA[i]);
cudaFree(corr_G[i]);
cudaFree(corr_I[i]);
}
for (int i = 0; i < maxVariables; i++) {
cudaFree(dBatchXA[i]);
cudaFree(dBatchG[i]);
cudaFree(dBatchI[i]);
}
for (int i = 0; i < numStreams; i++) cudaStreamDestroy(streams[i]);
cudaFree(dXA);
cudaFree(dG);
cudaFree(dI);
cudaFree(corr_dXA);
cudaFree(corr_dG);
cudaFree(corr_dI);
cublasDestroy(hnd);
return 0;
}
|
6a6d066184b819331aad076c3b9af7a2e5711992.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*----------------------- BEGIN kppa_CUDA_rates.cu BEGIN ----------------------*/
/* @file kppa_CUDA_rates.cu */
/* @author charlesj */
/* @date 2015-07-06 14:41:44.647816 */
/* @brief Reaction rate calculation and utility functions */
/* */
/* Reaction rate calculation and utility functions */
/* */
/* This file was generated by Kppa: http://www.paratools.com/Kppa */
/*-----------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include "kppa_CUDA_cu_parameters.h"
#include "kppa_CUDA_rates.h"
/* BEGIN INLINE declared at /users/charlesj/KPP_BOXMODEL/cosmo-art-new/kppa-0.2.1_CUDA_files_with_deffix/radm2_kpp_eleni_0714_kppa.def:311,1 */
__device__
double TROE(double const K0300, double const Q, double const KU300, double const R, double const M, double const T)
{
double TT, K0, KU, K0M, KK, LGKK, E, F;
TT = T / 3.e2;
K0 = K0300 / pow(TT,Q);
KU = KU300 / pow(TT,R);
K0M = K0 * M;
KK = K0M / KU;
LGKK = 0.434294481 * log(KK);
E = 1.0 / (1.0 + LGKK*LGKK);
F = pow(0.6,E);
return F * K0M / (1. + KK);
}
/*------------------------------------ EQT ------------------------------------*/
/* @param[in] K0300 None */
/* @param[in] Q None */
/* @param[in] KU300 None */
/* @param[in] R None */
/* @param[in] M None */
/* @param[in] T None */
/* @param[in] A None */
/* @param[in] B None */
/*-----------------------------------------------------------------------------*/
__device__
double EQT(double const K0300, double const Q, double const KU300, double const R, double const M, double const T, double const A, double const B)
{
double KH;
KH = TROE( K0300, Q, KU300, R, M, T );
return (KH * A * exp( -B / T ));
}
/*----------------------------------- SPEZ ------------------------------------*/
/* @param[in] A0 None */
/* @param[in] B0 None */
/* @param[in] A2 None */
/* @param[in] B2 None */
/* @param[in] A3 None */
/* @param[in] B3 None */
/* @param[in] M None */
/* @param[in] T None */
/*-----------------------------------------------------------------------------*/
__device__
double SPEZ(double const A0, double const B0, double const A2, double const B2, double const A3, double const B3, double const M, double const T)
{
double K0, K2, K3;
K0 = A0*exp(B0/T);
K2 = A2*exp(B2/T);
K3 = A3*M*exp(B3/T);
return K0 + K3 / ( 1 + K3/K2 );
}
/* END INLINE declared at /users/charlesj/KPP_BOXMODEL/cosmo-art-new/kppa-0.2.1_CUDA_files_with_deffix/radm2_kpp_eleni_0714_kppa.def:311,1 */
/* Be friendly to Fortran mathmatical intrinsics */
#define SQRT(X) sqrtf(x)
#define DSQRT(X) sqrt(x)
#define EXP(X) expf(x)
#define DEXP(X) exp(x)
#define LOG(x) log(x)
#define ALOG(X) logf(x)
#define DLOG(X) log(x)
#define LOG10(x) log10(x)
#define ALOG10(X) logf10(x)
#define DLOG10(X) log10(x)
#define SIN(X) sinf(x)
#define DSIN(X) sin(x)
#define COS(X) cosf(x)
#define DCOS(X) cos(x)
#define TAN(X) tanf(x)
#define DTAN(X) tan(x)
#define POW(X,Y) powf(x, y)
#define DPOW(X,Y) pow(x, y)
/*------------------------------------ ARR ------------------------------------*/
/* @param[in] a0 None */
/* @param[in] b0 None */
/* @param[in] c0 None */
/* @param[in] temp Temperature */
/*-----------------------------------------------------------------------------*/
__device__
double ARR(double const a0, double const b0, double const c0, double const
temp)
{
return __dmul_rn(__dmul_rn(a0, pow(__ddiv_rn(temp,(double)300.0), c0)),
exp(__ddiv_rn(-b0,temp)));
}/* END ARR */
/*------------------------------------ ARR2 -----------------------------------*/
/* @param[in] a0 None */
/* @param[in] b0 None */
/* @param[in] temp Temperature */
/*-----------------------------------------------------------------------------*/
__device__
double ARR2(double const a0, double const b0, double const temp)
{
return __dmul_rn(a0, exp(__ddiv_rn(b0,temp)));
}/* END ARR2 */
/*------------------------------------ EP2 ------------------------------------*/
/* @param[in] a0 None */
/* @param[in] c0 None */
/* @param[in] a2 None */
/* @param[in] c2 None */
/* @param[in] a3 None */
/* @param[in] c3 None */
/* @param[in] temp Temperature */
/*-----------------------------------------------------------------------------*/
__device__
double EP2(double const a0, double const c0, double const a2, double const
c2, double const a3, double const c3, double const temp)
{
double k0 = __dmul_rn(a0, exp(__ddiv_rn(-c0,temp)));
double k2 = __dmul_rn(a2, exp(__ddiv_rn(-c2,temp)));
double k3 = __dmul_rn(a3, exp(__ddiv_rn(-c3,temp))) * (double)1.0e6*CFACTOR;
return __dadd_rn(k0, __ddiv_rn(k3,
__dadd_rn((double)1.0, __ddiv_rn(k3,k2))));
}/* END EP2 */
/*------------------------------------ EP3 ------------------------------------*/
/* @param[in] a1 None */
/* @param[in] c1 None */
/* @param[in] a2 None */
/* @param[in] c2 None */
/* @param[in] temp Temperature */
/*-----------------------------------------------------------------------------*/
__device__
double EP3(double const a1, double const c1, double const a2, double const
c2, double const temp)
{
double k1 = __dmul_rn(a1, exp(__ddiv_rn(-c1,temp)));
double k2 = __dmul_rn(a2, exp(__ddiv_rn(-c2,temp)));
return __dadd_rn(k1, __dmul_rn(k2, (double)1.0e6*CFACTOR));
}/* END EP3 */
/*------------------------------------ FALL -----------------------------------*/
/* @param[in] a0 None */
/* @param[in] b0 None */
/* @param[in] c0 None */
/* @param[in] a1 None */
/* @param[in] b1 None */
/* @param[in] c1 None */
/* @param[in] cf None */
/* @param[in] temp Temperature */
/*-----------------------------------------------------------------------------*/
__device__
double FALL(double const a0, double const b0, double const c0, double const
a1, double const b1, double const c1, double const cf, double const temp)
{
/* Accuracy trumps precision in these calculations */
double k0 = a0 * pow(temp/(double)300.0, c0) * exp(-b0/temp) * (double)1.0e6*CFACTOR;
double k1 = k0 / (a1 * pow(temp/(double)300.0, c1) * exp(-b1/temp));
return (k0/((double)1.0+k1)) * pow(cf, (double)1.0/((double)1.0+pow(log10(k1),(double)2.0)));
}/* END FALL */
/*---------------------------------- Sunlight ---------------------------------*/
/* Calculates sunlight intensity in the range [0,1] as a function of time. */
/* Modify this routine to get the correct sunlight values for your model. */
/* */
/* @param[in] time Integration time */
/* @param[in] idx Current grid cell index */
/*-----------------------------------------------------------------------------*/
__device__
double Sunlight(double const time, size_t const idx)
{
int daysec = 24 * 3600; /* Seconds per day */
float sunrise = 5.5*3600; /* 5:30 local time */
float sunset = 19.5*3600; /* 7:30 local time */
float daily = time - ((int)time / daysec) * daysec;
float tmp;
/* Estimate sunlight intensity in the range [0,1] */
if ((daily >= sunrise) && (daily <= sunset)) {
tmp = __ddiv_rn(2.0*daily - sunrise-sunset, sunset-sunrise);
tmp = (tmp > 0) ? tmp * tmp : -tmp * tmp;
tmp = 0.5 * (1.0 + cospi(tmp));
} else {
tmp = 0.0;
}
return tmp;
}/* END Sunlight */
/*------------------------------------ TROE -----------------------------------*/
/* Troe reactions (Stockwell et. al., 1997) */
/* */
/* @param[in] k0_300K None */
/* @param[in] n None */
/* @param[in] kinf_300K None */
/* @param[in] m None */
/* @param[in] temp Temperature */
/* @param[in] cair None */
/*-----------------------------------------------------------------------------*/
__device__
double TROE(double const k0_300K, double const n, double const kinf_300K,
double const m, double const temp, double const cair)
{
double zt_help = 300.0/temp;
double k0_T = k0_300K * pow(zt_help, n) * cair;
double kinf_T = kinf_300K * pow(zt_help, m);
double k_ratio = k0_T/kinf_T;
return k0_T / (1.0 + k_ratio) * pow(0.6, 1.0/(1.0+pow(log10(k_ratio), 2)));
}/* END TROE */
/*----------------------------------- TROEE -----------------------------------*/
/* Troe equilibrium reactions (Stockwell et. al., 1997) */
/* */
/* @param[in] a0 None */
/* @param[in] b0 None */
/* @param[in] k0_300K None */
/* @param[in] n None */
/* @param[in] kinf_300K None */
/* @param[in] m None */
/* @param[in] temp Temperature */
/* @param[in] cair None */
/*-----------------------------------------------------------------------------*/
__device__
double TROEE(double const a0, double const b0, double const k0_300K, double
const n, double const kinf_300K, double const m, double const temp,
double const cair)
{
double zt_help = 300.0/temp;
double k0_T = k0_300K * pow(zt_help,n) * cair;
double kinf_T = kinf_300K * pow(zt_help,m);
double k_ratio = k0_T/kinf_T;
double troe = k0_T / (1.0 + k_ratio) * pow(0.6, 1.0/(1.0+pow(log10(k_ratio), 2)));
return a0 * exp(-b0 / temp) * troe;
}/* END TROEE */
/*-------------------------------- Temperature --------------------------------*/
/* Calculates temperature (kelvin) as a function of time. */
/* Modify this routine to get the correct temperature values for your model. */
/* */
/* @param[in] time Integration time */
/* @param[in] idx Current grid cell index */
/*-----------------------------------------------------------------------------*/
__device__
double Temperature(double const time, size_t const idx)
{
float mintemp = 280; /* 280 Kelvin ~= 44 Fahrenheit */
float maxtemp = 300; /* 300 Kelvin ~= 80 Fahrenheit */
float tmp;
/* Estimate temperature cycling from mintemp to maxtemp */
tmp = sinpi(__ddiv_rn(time,24*3600));
if (tmp < 0) {
tmp = mintemp - tmp * (maxtemp-mintemp);
} else {
tmp = mintemp + tmp * (maxtemp-mintemp);
}
return tmp;
}/* END Temperature */
/*---------------------------------- d_Rates ----------------------------------*/
/* CUDA kernel for Rates */
/* */
/* @param[in] ncells32 A multiple of 32 grid cells */
/* @param[in] time Integration time */
/* @param[in] idx Current grid cell index */
/* @param[out] rct Reaction rates */
/*-----------------------------------------------------------------------------*/
__global__
void d_Rates(size_t const ncells32, double const time, size_t const idx,
double* rct)
{
/* Sunlight intensity: 0 to 1 inclusive (uppercase for KPP compatibility) */
double SUN;
/* Temperature in kelvin (uppercase for KPP compatibility) */
double TEMP;
SUN = Sunlight(time, idx);
TEMP = Temperature(time, idx);
size_t tidx = blockDim.x*blockIdx.x + threadIdx.x;
if(tidx < ncells32) {
rct += tidx;
rct[0] = PHOTO(1);
rct[ncells32] = PHOTO(2);
rct[2*ncells32] = PHOTO(3);
rct[3*ncells32] = PHOTO(4);
rct[4*ncells32] = PHOTO(5);
rct[5*ncells32] = PHOTO(6) +
EQT(1.8e-31,3.2e0,4.7e-12,1.4e0,M,TEMP,4.76e+26,10900.e0);
rct[6*ncells32] = PHOTO(7);
rct[7*ncells32] = PHOTO(8);
rct[8*ncells32] = PHOTO(9);
rct[9*ncells32] = PHOTO(10);
rct[10*ncells32] = PHOTO(11);
rct[11*ncells32] = PHOTO(12);
rct[12*ncells32] = PHOTO(13);
rct[13*ncells32] = PHOTO(14);
rct[14*ncells32] = PHOTO(15);
rct[15*ncells32] = PHOTO(16);
rct[16*ncells32] = PHOTO(17);
rct[17*ncells32] = PHOTO(18);
rct[18*ncells32] = PHOTO(19);
rct[19*ncells32] = PHOTO(20);
rct[20*ncells32] = PHOTO(21);
rct[21*ncells32] = M * 6.0E-34 * pow((TEMP/300.),(-2.3));
rct[22*ncells32] = 6.50E-12 * exp( 120. / TEMP );
rct[23*ncells32] = 2.00E-11 * exp( 130. / TEMP );
rct[24*ncells32] = 3.20E-11 * exp( 67. / TEMP );
rct[25*ncells32] = 2.14e-10;
rct[26*ncells32] = 1.4E-12 * exp( -1310. / TEMP );
rct[27*ncells32] = 1.70E-12 * exp( -940. / TEMP );
rct[28*ncells32] = 1.10E-14 * exp( -500. / TEMP );
rct[29*ncells32] = 3.45E-12 * exp( 270. / TEMP );
rct[30*ncells32] = TROE( 1.8E-31, 3.2e0, 4.7E-12, 1.4e0, M, TEMP );
rct[31*ncells32] = 2.2E-13 * exp(620./TEMP) + 1.9E-33 * M *
exp(980./TEMP);
rct[32*ncells32] = 3.08e-34*exp(2820./TEMP)+2.66e-54*M*exp(3180./TEMP);
rct[33*ncells32] = 3.30E-12 * exp( -200. / TEMP );
rct[34*ncells32] = TROE( 7.0e-31, 2.6e0, 1.5e-11, 0.5e0, M, TEMP );
rct[35*ncells32] = 3.30E-39 * exp( 530. / TEMP );
rct[36*ncells32] = 1.40E-13 * exp( -2470. / TEMP );
rct[37*ncells32] = 1.80E-11 * exp( 110. / TEMP );
rct[38*ncells32] = 2.50E-14 * exp( -1230. / TEMP );
rct[39*ncells32] = 2.5e-12;
rct[40*ncells32] = TROE( 2.2e-30, 4.3e0, 1.5e-12, 0.5e0, M, TEMP );
rct[41*ncells32] =
EQT(2.2e-30,4.3e0,1.5e-12,0.5e0,M,TEMP,9.09e+26,11200.e0);
rct[42*ncells32] = HET;
rct[43*ncells32] = TROE( 2.6e-30, 3.2e0, 2.4e-11, 1.3e0, M, TEMP );
rct[44*ncells32] =
SPEZ(7.2e-15,785.e0,4.1e-16,1440.e0,1.9e-33,725.e0,M,TEMP);
rct[45*ncells32] = 1.30E-12 * exp( 380. / TEMP );
rct[46*ncells32] = 4.80E-11 * exp( 250. / TEMP );
rct[47*ncells32] = TROE( 3.0e-31, 3.3e0, 1.5e-12, 0.0e0, M, TEMP );
rct[48*ncells32] = 1.5E-13 * ( 1.0 + 2.439E-20 * M );
rct[49*ncells32] = TEMP * TEMP * 6.95E-18 * exp( -1280. / TEMP );
rct[50*ncells32] = TEMP * TEMP * 1.37E-17 * exp( -444. /TEMP );
rct[51*ncells32] = 1.59E-11 * exp( -540. / TEMP );
rct[52*ncells32] = 1.73E-11 * exp( -380. / TEMP );
rct[53*ncells32] = 3.64E-11 * exp( -380. / TEMP );
rct[54*ncells32] = 2.15E-12 * exp( 411. / TEMP );
rct[55*ncells32] = 5.32E-12 * exp( 504. / TEMP );
rct[56*ncells32] = 1.07E-11 * exp( 549. / TEMP );
rct[57*ncells32] = 2.10E-12 * exp( 322. / TEMP );
rct[58*ncells32] = 1.89E-11 * exp( 116. / TEMP );
rct[59*ncells32] = 4e-11;
rct[60*ncells32] = 9e-12;
rct[61*ncells32] = 6.87E-12 * exp( 256. / TEMP );
rct[62*ncells32] = 1.20E-11 * exp( -745. / TEMP );
rct[63*ncells32] = 1.15e-11;
rct[64*ncells32] = 1.7e-11;
rct[65*ncells32] = 2.8e-11;
rct[66*ncells32] = 1e-11;
rct[67*ncells32] = 1e-11;
rct[68*ncells32] = 1e-11;
rct[69*ncells32] = TEMP * TEMP *6.85E-18 * exp( -444. / TEMP );
rct[70*ncells32] = 1.55E-11 * exp( -540. / TEMP );
rct[71*ncells32] = 2.55E-11 * exp( 409. / TEMP );
rct[72*ncells32] = 2.6E-12 * exp ( 380. / TEMP);
rct[73*ncells32] = 2.E16 * exp (-13500. / TEMP);
rct[74*ncells32] = 4.7e-12;
rct[75*ncells32] = 1.95E16 * exp(-13543. / TEMP );
rct[76*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[77*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[78*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[79*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[80*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[81*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[82*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[83*ncells32] = 3.50E-11 * exp( -180. / TEMP );
rct[84*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[85*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[86*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[87*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[88*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[89*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[90*ncells32] = 6.00E-13 * exp( -2058. / TEMP );
rct[91*ncells32] = 1.40E-12 * exp( -1900. / TEMP);
rct[92*ncells32] = 6.00E-13 * exp( -2058. / TEMP );
rct[93*ncells32] = 1.40E-12 * exp( -1900. / TEMP);
rct[94*ncells32] = 1.40E-12 * exp( -1900. / TEMP);
rct[95*ncells32] = 2.2e-11;
rct[96*ncells32] = 2.00E-12 * exp( -2923. / TEMP );
rct[97*ncells32] = 1.00E-11 * exp( -1895. / TEMP );
rct[98*ncells32] = 3.23E-11 * exp( -975. / TEMP );
rct[99*ncells32] = 5.81e-13;
rct[100*ncells32] = 1.20E-14 * exp( -2633. / TEMP );
rct[101*ncells32] = 1.32E-14 * exp( -2105. / TEMP );
rct[102*ncells32] = 7.29E-15 * exp( -1136. / TEMP );
rct[103*ncells32] = 1.23E-14 * exp( -2013. / TEMP );
rct[104*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[105*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[106*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[107*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[108*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[109*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[110*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[111*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[112*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[113*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[114*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[115*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[116*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[117*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[118*ncells32] = 1.90E-13 * exp( 220. / TEMP );
rct[119*ncells32] = 1.40E-13 * exp( 220. / TEMP );
rct[120*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[121*ncells32] = 3.40E-14 * exp( 220. / TEMP );
rct[122*ncells32] = 2.90E-14 * exp( 220. / TEMP );
rct[123*ncells32] = 1.40E-13 * exp( 220. / TEMP );
rct[124*ncells32] = 1.40E-13 * exp( 220. / TEMP );
rct[125*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[126*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[127*ncells32] = 9.60E-13 * exp( 220. / TEMP );
rct[128*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[129*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[130*ncells32] = 9.60E-13 * exp( 220. / TEMP );
rct[131*ncells32] = 3.40E-13 * exp( 220. / TEMP );
rct[132*ncells32] = 1.00E-13 * exp( 220. / TEMP );
rct[133*ncells32] = 8.40E-14 * exp( 220. / TEMP );
rct[134*ncells32] = 7.20E-14 * exp( 220. / TEMP );
rct[135*ncells32] = 3.40E-13 * exp( 220. / TEMP );
rct[136*ncells32] = 3.40E-13 * exp( 220. / TEMP );
rct[137*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[138*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[139*ncells32] = 1.19E-12 * exp( 220. / TEMP );
rct[140*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[141*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[142*ncells32] = 1.19E-12 * exp( 220. / TEMP );
rct[143*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[144*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[145*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[146*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[147*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[148*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[149*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[150*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[151*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[152*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[153*ncells32] = 3.60E-16 * exp( 220. / TEMP );
rct[154*ncells32] = 1.21E-11 * exp( 444. / TEMP );
rct[155*ncells32] = ARR2(1.19E-12,490.,TEMP);
rct[156*ncells32] = ARR2(1.01E-15,-736.,TEMP);
rct[157*ncells32] = 4e-12;
rct[158*ncells32] = 1.5e-11;
rct[159*ncells32] = ARR2(3.56E-14,708.,TEMP);
rct[160*ncells32] = ARR2(7.40E-13,765.,TEMP);
rct[161*ncells32] = 1.2e-12;
rct[162*ncells32] = 1.7e-10;
rct[163*ncells32] = 1.22e-11;
rct[164*ncells32] = 2e-16;
rct[165*ncells32] = 4e-12;
rct[166*ncells32] = 1.5e-11;
rct[167*ncells32] = ARR2(3.56E-14,708.,TEMP);
rct[168*ncells32] = ARR2(7.40E-13,765.,TEMP);
rct[169*ncells32] = 1.2e-12;
rct[170*ncells32] = ARR2(2.43E-12,360.,TEMP);
rct[171*ncells32] = ARR2(2.05E-13,1300.,TEMP);
rct[172*ncells32] = 2e-12;
rct[173*ncells32] = 1e-10;
rct[174*ncells32] = 1.3e-11;
rct[175*ncells32] = 0.5*(4.13E-12 * exp( 452. / TEMP ) + 1.86E-11 * exp(
175. / TEMP ));
rct[176*ncells32] = 0.5*(1.36E-15 * exp( -2112. / TEMP ) + 7.51E-16 *
exp( -1521. / TEMP ));
rct[177*ncells32] = ARR2(2.54E-12,360.,TEMP);
rct[178*ncells32] = ARR2(1.82E-13,1300.,TEMP);
rct[179*ncells32] = 2e-12;
rct[180*ncells32] = TROE( 9.7e-29, -5.6e0, 9.3e-12, -1.5e0, M, TEMP );
rct[181*ncells32] = TROE( 9.7e-29, -5.6e0, 9.3e-12, -1.5e0, M, TEMP
)/(ARR2(9.0E-19,14000.,TEMP));
rct[182*ncells32] = 3.6e-12;
rct[183*ncells32] = 3e-11;
rct[184*ncells32] = 3e-12;
rct[185*ncells32] = ARR2(5.60E-12,270.,TEMP);
rct[186*ncells32] = ARR2(1.9E-13,500.,TEMP);
rct[187*ncells32] = ARR2(9.6E-12,-234.,TEMP);
rct[188*ncells32] =
ARR2(3.04E-12,350.,TEMP)*ARR2(1.106E-31,7460.,TEMP)*M/(1+ARR2(1.106E-31,7460.,TEMP)*M);
rct[189*ncells32] = 5.8e-11;
rct[190*ncells32] = 2.5e-12;
rct[191*ncells32] = 2.5e-12;
rct[192*ncells32] = 2.5e-12;
rct[193*ncells32] = 0.0;
}
}/* END d_Rates */
/*----------------------------------- Rates -----------------------------------*/
/* Calculates reaction rate coefficients */
/* */
/* @param[in] ncells32 A multiple of 32 grid cells */
/* @param[in] time Integration time */
/* @param[in] idx Current grid cell index */
/* @param[out] d_rct Reaction rates in device memory */
/*-----------------------------------------------------------------------------*/
void Rates(size_t const ncells32, double const time, size_t const idx,
double* d_rct)
{
size_t nBlocks = ((ncells32 + 127) & ~127) >> 7;
size_t nThreads = 128;
hipLaunchKernelGGL(( d_Rates), dim3(nBlocks), dim3(nThreads), 0, 0, ncells32, time, idx, d_rct);
}/* END Rates */
/*------------------------- END kppa_CUDA_rates.h END -------------------------*/
| 6a6d066184b819331aad076c3b9af7a2e5711992.cu | /*----------------------- BEGIN kppa_CUDA_rates.cu BEGIN ----------------------*/
/* @file kppa_CUDA_rates.cu */
/* @author charlesj */
/* @date 2015-07-06 14:41:44.647816 */
/* @brief Reaction rate calculation and utility functions */
/* */
/* Reaction rate calculation and utility functions */
/* */
/* This file was generated by Kppa: http://www.paratools.com/Kppa */
/*-----------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include "kppa_CUDA_cu_parameters.h"
#include "kppa_CUDA_rates.h"
/* BEGIN INLINE declared at /users/charlesj/KPP_BOXMODEL/cosmo-art-new/kppa-0.2.1_CUDA_files_with_deffix/radm2_kpp_eleni_0714_kppa.def:311,1 */
__device__
double TROE(double const K0300, double const Q, double const KU300, double const R, double const M, double const T)
{
double TT, K0, KU, K0M, KK, LGKK, E, F;
TT = T / 3.e2;
K0 = K0300 / pow(TT,Q);
KU = KU300 / pow(TT,R);
K0M = K0 * M;
KK = K0M / KU;
LGKK = 0.434294481 * log(KK);
E = 1.0 / (1.0 + LGKK*LGKK);
F = pow(0.6,E);
return F * K0M / (1. + KK);
}
/*------------------------------------ EQT ------------------------------------*/
/* @param[in] K0300 None */
/* @param[in] Q None */
/* @param[in] KU300 None */
/* @param[in] R None */
/* @param[in] M None */
/* @param[in] T None */
/* @param[in] A None */
/* @param[in] B None */
/*-----------------------------------------------------------------------------*/
__device__
double EQT(double const K0300, double const Q, double const KU300, double const R, double const M, double const T, double const A, double const B)
{
double KH;
KH = TROE( K0300, Q, KU300, R, M, T );
return (KH * A * exp( -B / T ));
}
/*----------------------------------- SPEZ ------------------------------------*/
/* @param[in] A0 None */
/* @param[in] B0 None */
/* @param[in] A2 None */
/* @param[in] B2 None */
/* @param[in] A3 None */
/* @param[in] B3 None */
/* @param[in] M None */
/* @param[in] T None */
/*-----------------------------------------------------------------------------*/
__device__
double SPEZ(double const A0, double const B0, double const A2, double const B2, double const A3, double const B3, double const M, double const T)
{
double K0, K2, K3;
K0 = A0*exp(B0/T);
K2 = A2*exp(B2/T);
K3 = A3*M*exp(B3/T);
return K0 + K3 / ( 1 + K3/K2 );
}
/* END INLINE declared at /users/charlesj/KPP_BOXMODEL/cosmo-art-new/kppa-0.2.1_CUDA_files_with_deffix/radm2_kpp_eleni_0714_kppa.def:311,1 */
/* Be friendly to Fortran mathmatical intrinsics */
#define SQRT(X) sqrtf(x)
#define DSQRT(X) sqrt(x)
#define EXP(X) expf(x)
#define DEXP(X) exp(x)
#define LOG(x) log(x)
#define ALOG(X) logf(x)
#define DLOG(X) log(x)
#define LOG10(x) log10(x)
#define ALOG10(X) logf10(x)
#define DLOG10(X) log10(x)
#define SIN(X) sinf(x)
#define DSIN(X) sin(x)
#define COS(X) cosf(x)
#define DCOS(X) cos(x)
#define TAN(X) tanf(x)
#define DTAN(X) tan(x)
#define POW(X,Y) powf(x, y)
#define DPOW(X,Y) pow(x, y)
/*------------------------------------ ARR ------------------------------------*/
/* @param[in] a0 None */
/* @param[in] b0 None */
/* @param[in] c0 None */
/* @param[in] temp Temperature */
/*-----------------------------------------------------------------------------*/
__device__
double ARR(double const a0, double const b0, double const c0, double const
temp)
{
return __dmul_rn(__dmul_rn(a0, pow(__ddiv_rn(temp,(double)300.0), c0)),
exp(__ddiv_rn(-b0,temp)));
}/* END ARR */
/*------------------------------------ ARR2 -----------------------------------*/
/* @param[in] a0 None */
/* @param[in] b0 None */
/* @param[in] temp Temperature */
/*-----------------------------------------------------------------------------*/
__device__
double ARR2(double const a0, double const b0, double const temp)
{
return __dmul_rn(a0, exp(__ddiv_rn(b0,temp)));
}/* END ARR2 */
/*------------------------------------ EP2 ------------------------------------*/
/* @param[in] a0 None */
/* @param[in] c0 None */
/* @param[in] a2 None */
/* @param[in] c2 None */
/* @param[in] a3 None */
/* @param[in] c3 None */
/* @param[in] temp Temperature */
/*-----------------------------------------------------------------------------*/
__device__
double EP2(double const a0, double const c0, double const a2, double const
c2, double const a3, double const c3, double const temp)
{
double k0 = __dmul_rn(a0, exp(__ddiv_rn(-c0,temp)));
double k2 = __dmul_rn(a2, exp(__ddiv_rn(-c2,temp)));
double k3 = __dmul_rn(a3, exp(__ddiv_rn(-c3,temp))) * (double)1.0e6*CFACTOR;
return __dadd_rn(k0, __ddiv_rn(k3,
__dadd_rn((double)1.0, __ddiv_rn(k3,k2))));
}/* END EP2 */
/*------------------------------------ EP3 ------------------------------------*/
/* @param[in] a1 None */
/* @param[in] c1 None */
/* @param[in] a2 None */
/* @param[in] c2 None */
/* @param[in] temp Temperature */
/*-----------------------------------------------------------------------------*/
__device__
double EP3(double const a1, double const c1, double const a2, double const
c2, double const temp)
{
double k1 = __dmul_rn(a1, exp(__ddiv_rn(-c1,temp)));
double k2 = __dmul_rn(a2, exp(__ddiv_rn(-c2,temp)));
return __dadd_rn(k1, __dmul_rn(k2, (double)1.0e6*CFACTOR));
}/* END EP3 */
/*------------------------------------ FALL -----------------------------------*/
/* @param[in] a0 None */
/* @param[in] b0 None */
/* @param[in] c0 None */
/* @param[in] a1 None */
/* @param[in] b1 None */
/* @param[in] c1 None */
/* @param[in] cf None */
/* @param[in] temp Temperature */
/*-----------------------------------------------------------------------------*/
__device__
double FALL(double const a0, double const b0, double const c0, double const
a1, double const b1, double const c1, double const cf, double const temp)
{
/* Accuracy trumps precision in these calculations */
double k0 = a0 * pow(temp/(double)300.0, c0) * exp(-b0/temp) * (double)1.0e6*CFACTOR;
double k1 = k0 / (a1 * pow(temp/(double)300.0, c1) * exp(-b1/temp));
return (k0/((double)1.0+k1)) * pow(cf, (double)1.0/((double)1.0+pow(log10(k1),(double)2.0)));
}/* END FALL */
/*---------------------------------- Sunlight ---------------------------------*/
/* Calculates sunlight intensity in the range [0,1] as a function of time. */
/* Modify this routine to get the correct sunlight values for your model. */
/* */
/* @param[in] time Integration time */
/* @param[in] idx Current grid cell index */
/*-----------------------------------------------------------------------------*/
__device__
double Sunlight(double const time, size_t const idx)
{
int daysec = 24 * 3600; /* Seconds per day */
float sunrise = 5.5*3600; /* 5:30 local time */
float sunset = 19.5*3600; /* 7:30 local time */
float daily = time - ((int)time / daysec) * daysec;
float tmp;
/* Estimate sunlight intensity in the range [0,1] */
if ((daily >= sunrise) && (daily <= sunset)) {
tmp = __ddiv_rn(2.0*daily - sunrise-sunset, sunset-sunrise);
tmp = (tmp > 0) ? tmp * tmp : -tmp * tmp;
tmp = 0.5 * (1.0 + cospi(tmp));
} else {
tmp = 0.0;
}
return tmp;
}/* END Sunlight */
/*------------------------------------ TROE -----------------------------------*/
/* Troe reactions (Stockwell et. al., 1997) */
/* */
/* @param[in] k0_300K None */
/* @param[in] n None */
/* @param[in] kinf_300K None */
/* @param[in] m None */
/* @param[in] temp Temperature */
/* @param[in] cair None */
/*-----------------------------------------------------------------------------*/
__device__
double TROE(double const k0_300K, double const n, double const kinf_300K,
double const m, double const temp, double const cair)
{
double zt_help = 300.0/temp;
double k0_T = k0_300K * pow(zt_help, n) * cair;
double kinf_T = kinf_300K * pow(zt_help, m);
double k_ratio = k0_T/kinf_T;
return k0_T / (1.0 + k_ratio) * pow(0.6, 1.0/(1.0+pow(log10(k_ratio), 2)));
}/* END TROE */
/*----------------------------------- TROEE -----------------------------------*/
/* Troe equilibrium reactions (Stockwell et. al., 1997) */
/* */
/* @param[in] a0 None */
/* @param[in] b0 None */
/* @param[in] k0_300K None */
/* @param[in] n None */
/* @param[in] kinf_300K None */
/* @param[in] m None */
/* @param[in] temp Temperature */
/* @param[in] cair None */
/*-----------------------------------------------------------------------------*/
__device__
double TROEE(double const a0, double const b0, double const k0_300K, double
const n, double const kinf_300K, double const m, double const temp,
double const cair)
{
double zt_help = 300.0/temp;
double k0_T = k0_300K * pow(zt_help,n) * cair;
double kinf_T = kinf_300K * pow(zt_help,m);
double k_ratio = k0_T/kinf_T;
double troe = k0_T / (1.0 + k_ratio) * pow(0.6, 1.0/(1.0+pow(log10(k_ratio), 2)));
return a0 * exp(-b0 / temp) * troe;
}/* END TROEE */
/*-------------------------------- Temperature --------------------------------*/
/* Calculates temperature (kelvin) as a function of time. */
/* Modify this routine to get the correct temperature values for your model. */
/* */
/* @param[in] time Integration time */
/* @param[in] idx Current grid cell index */
/*-----------------------------------------------------------------------------*/
__device__
double Temperature(double const time, size_t const idx)
{
float mintemp = 280; /* 280 Kelvin ~= 44 Fahrenheit */
float maxtemp = 300; /* 300 Kelvin ~= 80 Fahrenheit */
float tmp;
/* Estimate temperature cycling from mintemp to maxtemp */
tmp = sinpi(__ddiv_rn(time,24*3600));
if (tmp < 0) {
tmp = mintemp - tmp * (maxtemp-mintemp);
} else {
tmp = mintemp + tmp * (maxtemp-mintemp);
}
return tmp;
}/* END Temperature */
/*---------------------------------- d_Rates ----------------------------------*/
/* CUDA kernel for Rates */
/* */
/* @param[in] ncells32 A multiple of 32 grid cells */
/* @param[in] time Integration time */
/* @param[in] idx Current grid cell index */
/* @param[out] rct Reaction rates */
/*-----------------------------------------------------------------------------*/
__global__
void d_Rates(size_t const ncells32, double const time, size_t const idx,
double* rct)
{
/* Sunlight intensity: 0 to 1 inclusive (uppercase for KPP compatibility) */
double SUN;
/* Temperature in kelvin (uppercase for KPP compatibility) */
double TEMP;
SUN = Sunlight(time, idx);
TEMP = Temperature(time, idx);
size_t tidx = blockDim.x*blockIdx.x + threadIdx.x;
if(tidx < ncells32) {
rct += tidx;
rct[0] = PHOTO(1);
rct[ncells32] = PHOTO(2);
rct[2*ncells32] = PHOTO(3);
rct[3*ncells32] = PHOTO(4);
rct[4*ncells32] = PHOTO(5);
rct[5*ncells32] = PHOTO(6) +
EQT(1.8e-31,3.2e0,4.7e-12,1.4e0,M,TEMP,4.76e+26,10900.e0);
rct[6*ncells32] = PHOTO(7);
rct[7*ncells32] = PHOTO(8);
rct[8*ncells32] = PHOTO(9);
rct[9*ncells32] = PHOTO(10);
rct[10*ncells32] = PHOTO(11);
rct[11*ncells32] = PHOTO(12);
rct[12*ncells32] = PHOTO(13);
rct[13*ncells32] = PHOTO(14);
rct[14*ncells32] = PHOTO(15);
rct[15*ncells32] = PHOTO(16);
rct[16*ncells32] = PHOTO(17);
rct[17*ncells32] = PHOTO(18);
rct[18*ncells32] = PHOTO(19);
rct[19*ncells32] = PHOTO(20);
rct[20*ncells32] = PHOTO(21);
rct[21*ncells32] = M * 6.0E-34 * pow((TEMP/300.),(-2.3));
rct[22*ncells32] = 6.50E-12 * exp( 120. / TEMP );
rct[23*ncells32] = 2.00E-11 * exp( 130. / TEMP );
rct[24*ncells32] = 3.20E-11 * exp( 67. / TEMP );
rct[25*ncells32] = 2.14e-10;
rct[26*ncells32] = 1.4E-12 * exp( -1310. / TEMP );
rct[27*ncells32] = 1.70E-12 * exp( -940. / TEMP );
rct[28*ncells32] = 1.10E-14 * exp( -500. / TEMP );
rct[29*ncells32] = 3.45E-12 * exp( 270. / TEMP );
rct[30*ncells32] = TROE( 1.8E-31, 3.2e0, 4.7E-12, 1.4e0, M, TEMP );
rct[31*ncells32] = 2.2E-13 * exp(620./TEMP) + 1.9E-33 * M *
exp(980./TEMP);
rct[32*ncells32] = 3.08e-34*exp(2820./TEMP)+2.66e-54*M*exp(3180./TEMP);
rct[33*ncells32] = 3.30E-12 * exp( -200. / TEMP );
rct[34*ncells32] = TROE( 7.0e-31, 2.6e0, 1.5e-11, 0.5e0, M, TEMP );
rct[35*ncells32] = 3.30E-39 * exp( 530. / TEMP );
rct[36*ncells32] = 1.40E-13 * exp( -2470. / TEMP );
rct[37*ncells32] = 1.80E-11 * exp( 110. / TEMP );
rct[38*ncells32] = 2.50E-14 * exp( -1230. / TEMP );
rct[39*ncells32] = 2.5e-12;
rct[40*ncells32] = TROE( 2.2e-30, 4.3e0, 1.5e-12, 0.5e0, M, TEMP );
rct[41*ncells32] =
EQT(2.2e-30,4.3e0,1.5e-12,0.5e0,M,TEMP,9.09e+26,11200.e0);
rct[42*ncells32] = HET;
rct[43*ncells32] = TROE( 2.6e-30, 3.2e0, 2.4e-11, 1.3e0, M, TEMP );
rct[44*ncells32] =
SPEZ(7.2e-15,785.e0,4.1e-16,1440.e0,1.9e-33,725.e0,M,TEMP);
rct[45*ncells32] = 1.30E-12 * exp( 380. / TEMP );
rct[46*ncells32] = 4.80E-11 * exp( 250. / TEMP );
rct[47*ncells32] = TROE( 3.0e-31, 3.3e0, 1.5e-12, 0.0e0, M, TEMP );
rct[48*ncells32] = 1.5E-13 * ( 1.0 + 2.439E-20 * M );
rct[49*ncells32] = TEMP * TEMP * 6.95E-18 * exp( -1280. / TEMP );
rct[50*ncells32] = TEMP * TEMP * 1.37E-17 * exp( -444. /TEMP );
rct[51*ncells32] = 1.59E-11 * exp( -540. / TEMP );
rct[52*ncells32] = 1.73E-11 * exp( -380. / TEMP );
rct[53*ncells32] = 3.64E-11 * exp( -380. / TEMP );
rct[54*ncells32] = 2.15E-12 * exp( 411. / TEMP );
rct[55*ncells32] = 5.32E-12 * exp( 504. / TEMP );
rct[56*ncells32] = 1.07E-11 * exp( 549. / TEMP );
rct[57*ncells32] = 2.10E-12 * exp( 322. / TEMP );
rct[58*ncells32] = 1.89E-11 * exp( 116. / TEMP );
rct[59*ncells32] = 4e-11;
rct[60*ncells32] = 9e-12;
rct[61*ncells32] = 6.87E-12 * exp( 256. / TEMP );
rct[62*ncells32] = 1.20E-11 * exp( -745. / TEMP );
rct[63*ncells32] = 1.15e-11;
rct[64*ncells32] = 1.7e-11;
rct[65*ncells32] = 2.8e-11;
rct[66*ncells32] = 1e-11;
rct[67*ncells32] = 1e-11;
rct[68*ncells32] = 1e-11;
rct[69*ncells32] = TEMP * TEMP *6.85E-18 * exp( -444. / TEMP );
rct[70*ncells32] = 1.55E-11 * exp( -540. / TEMP );
rct[71*ncells32] = 2.55E-11 * exp( 409. / TEMP );
rct[72*ncells32] = 2.6E-12 * exp ( 380. / TEMP);
rct[73*ncells32] = 2.E16 * exp (-13500. / TEMP);
rct[74*ncells32] = 4.7e-12;
rct[75*ncells32] = 1.95E16 * exp(-13543. / TEMP );
rct[76*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[77*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[78*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[79*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[80*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[81*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[82*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[83*ncells32] = 3.50E-11 * exp( -180. / TEMP );
rct[84*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[85*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[86*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[87*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[88*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[89*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[90*ncells32] = 6.00E-13 * exp( -2058. / TEMP );
rct[91*ncells32] = 1.40E-12 * exp( -1900. / TEMP);
rct[92*ncells32] = 6.00E-13 * exp( -2058. / TEMP );
rct[93*ncells32] = 1.40E-12 * exp( -1900. / TEMP);
rct[94*ncells32] = 1.40E-12 * exp( -1900. / TEMP);
rct[95*ncells32] = 2.2e-11;
rct[96*ncells32] = 2.00E-12 * exp( -2923. / TEMP );
rct[97*ncells32] = 1.00E-11 * exp( -1895. / TEMP );
rct[98*ncells32] = 3.23E-11 * exp( -975. / TEMP );
rct[99*ncells32] = 5.81e-13;
rct[100*ncells32] = 1.20E-14 * exp( -2633. / TEMP );
rct[101*ncells32] = 1.32E-14 * exp( -2105. / TEMP );
rct[102*ncells32] = 7.29E-15 * exp( -1136. / TEMP );
rct[103*ncells32] = 1.23E-14 * exp( -2013. / TEMP );
rct[104*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[105*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[106*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[107*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[108*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[109*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[110*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[111*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[112*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[113*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[114*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[115*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[116*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[117*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[118*ncells32] = 1.90E-13 * exp( 220. / TEMP );
rct[119*ncells32] = 1.40E-13 * exp( 220. / TEMP );
rct[120*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[121*ncells32] = 3.40E-14 * exp( 220. / TEMP );
rct[122*ncells32] = 2.90E-14 * exp( 220. / TEMP );
rct[123*ncells32] = 1.40E-13 * exp( 220. / TEMP );
rct[124*ncells32] = 1.40E-13 * exp( 220. / TEMP );
rct[125*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[126*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[127*ncells32] = 9.60E-13 * exp( 220. / TEMP );
rct[128*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[129*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[130*ncells32] = 9.60E-13 * exp( 220. / TEMP );
rct[131*ncells32] = 3.40E-13 * exp( 220. / TEMP );
rct[132*ncells32] = 1.00E-13 * exp( 220. / TEMP );
rct[133*ncells32] = 8.40E-14 * exp( 220. / TEMP );
rct[134*ncells32] = 7.20E-14 * exp( 220. / TEMP );
rct[135*ncells32] = 3.40E-13 * exp( 220. / TEMP );
rct[136*ncells32] = 3.40E-13 * exp( 220. / TEMP );
rct[137*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[138*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[139*ncells32] = 1.19E-12 * exp( 220. / TEMP );
rct[140*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[141*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[142*ncells32] = 1.19E-12 * exp( 220. / TEMP );
rct[143*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[144*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[145*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[146*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[147*ncells32] = 4.20E-12 * exp( 180. / TEMP );
rct[148*ncells32] = 7.70E-14 * exp( 1300. / TEMP );
rct[149*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[150*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[151*ncells32] = 1.70E-14 * exp( 220. / TEMP );
rct[152*ncells32] = 4.20E-14 * exp( 220. / TEMP );
rct[153*ncells32] = 3.60E-16 * exp( 220. / TEMP );
rct[154*ncells32] = 1.21E-11 * exp( 444. / TEMP );
rct[155*ncells32] = ARR2(1.19E-12,490.,TEMP);
rct[156*ncells32] = ARR2(1.01E-15,-736.,TEMP);
rct[157*ncells32] = 4e-12;
rct[158*ncells32] = 1.5e-11;
rct[159*ncells32] = ARR2(3.56E-14,708.,TEMP);
rct[160*ncells32] = ARR2(7.40E-13,765.,TEMP);
rct[161*ncells32] = 1.2e-12;
rct[162*ncells32] = 1.7e-10;
rct[163*ncells32] = 1.22e-11;
rct[164*ncells32] = 2e-16;
rct[165*ncells32] = 4e-12;
rct[166*ncells32] = 1.5e-11;
rct[167*ncells32] = ARR2(3.56E-14,708.,TEMP);
rct[168*ncells32] = ARR2(7.40E-13,765.,TEMP);
rct[169*ncells32] = 1.2e-12;
rct[170*ncells32] = ARR2(2.43E-12,360.,TEMP);
rct[171*ncells32] = ARR2(2.05E-13,1300.,TEMP);
rct[172*ncells32] = 2e-12;
rct[173*ncells32] = 1e-10;
rct[174*ncells32] = 1.3e-11;
rct[175*ncells32] = 0.5*(4.13E-12 * exp( 452. / TEMP ) + 1.86E-11 * exp(
175. / TEMP ));
rct[176*ncells32] = 0.5*(1.36E-15 * exp( -2112. / TEMP ) + 7.51E-16 *
exp( -1521. / TEMP ));
rct[177*ncells32] = ARR2(2.54E-12,360.,TEMP);
rct[178*ncells32] = ARR2(1.82E-13,1300.,TEMP);
rct[179*ncells32] = 2e-12;
rct[180*ncells32] = TROE( 9.7e-29, -5.6e0, 9.3e-12, -1.5e0, M, TEMP );
rct[181*ncells32] = TROE( 9.7e-29, -5.6e0, 9.3e-12, -1.5e0, M, TEMP
)/(ARR2(9.0E-19,14000.,TEMP));
rct[182*ncells32] = 3.6e-12;
rct[183*ncells32] = 3e-11;
rct[184*ncells32] = 3e-12;
rct[185*ncells32] = ARR2(5.60E-12,270.,TEMP);
rct[186*ncells32] = ARR2(1.9E-13,500.,TEMP);
rct[187*ncells32] = ARR2(9.6E-12,-234.,TEMP);
rct[188*ncells32] =
ARR2(3.04E-12,350.,TEMP)*ARR2(1.106E-31,7460.,TEMP)*M/(1+ARR2(1.106E-31,7460.,TEMP)*M);
rct[189*ncells32] = 5.8e-11;
rct[190*ncells32] = 2.5e-12;
rct[191*ncells32] = 2.5e-12;
rct[192*ncells32] = 2.5e-12;
rct[193*ncells32] = 0.0;
}
}/* END d_Rates */
/*----------------------------------- Rates -----------------------------------*/
/* Calculates reaction rate coefficients */
/* */
/* @param[in] ncells32 A multiple of 32 grid cells */
/* @param[in] time Integration time */
/* @param[in] idx Current grid cell index */
/* @param[out] d_rct Reaction rates in device memory */
/*-----------------------------------------------------------------------------*/
void Rates(size_t const ncells32, double const time, size_t const idx,
double* d_rct)
{
size_t nBlocks = ((ncells32 + 127) & ~127) >> 7;
size_t nThreads = 128;
d_Rates<<<nBlocks, nThreads>>>(ncells32, time, idx, d_rct);
}/* END Rates */
/*------------------------- END kppa_CUDA_rates.h END -------------------------*/
|
36b5f61672d01f63c2d63c475a45446e95d8f0de.hip | // !!! This is a file automatically generated by hipify!!!
//a reduction on a single MP with a little bit 'o ugly
//at the end to advance the wavefront
//WJB 03/11
#include <hip/hip_runtime.h>
#include <stdio.h>
//__constant__ int sizeTheta;
//__constant__ int sizePhi;
//
__global__ void advanceWave(float *devReductionKeysIn,
float *devReductionValuesIn,
float *devReductionKeysOut,
float *devReductionValuesOut,
float *devRadius,
float *devTTable,
int sizeTheta,
int sizePhi){
const int thread_x = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float valBlock[LOC_RED_THREADS];
__shared__ float keyBlock[LOC_RED_THREADS];
//__shared__ int flag;
valBlock[threadIdx.x] = devReductionValuesIn[thread_x];
keyBlock[threadIdx.x] = devReductionKeysIn[thread_x];
#if LOC_RED_THREADS > 511
if (threadIdx.x<256){
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+256]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+256];
keyBlock[threadIdx.x] = thread_x+256; } else{
keyBlock[threadIdx.x] = thread_x;
}
}
__syncthreads();
#endif
#if LOC_RED_THREADS > 255
if (threadIdx.x<128){
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+128]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+128];
keyBlock[threadIdx.x] = thread_x+128; } else{
keyBlock[threadIdx.x] = thread_x;
}
}
__syncthreads();
#endif
#if LOC_RED_THREADS > 127
if (threadIdx.x<64){
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+64]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+64];
keyBlock[threadIdx.x] = thread_x+64;
} else{
keyBlock[threadIdx.x] = thread_x;
}
}
__syncthreads();
#endif
#if LOC_RED_THREADS > 63
if (threadIdx.x<32){
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+32]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+32];
keyBlock[threadIdx.x] = thread_x+32; } else{
keyBlock[threadIdx.x] = thread_x;
}
}
#elif LOC_RED_THREADS==32
if (threadIdx.x<16){
#endif
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+16]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+16];
keyBlock[threadIdx.x] = thread_x+16; } else{
keyBlock[threadIdx.x] = thread_x;
}
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+8]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+8];
keyBlock[threadIdx.x] = thread_x+8; } else{
keyBlock[threadIdx.x] = thread_x;
}
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+4]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+4];
keyBlock[threadIdx.x] = thread_x+4; } else{
keyBlock[threadIdx.x] = thread_x;
}
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+2]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+2];
keyBlock[threadIdx.x] = thread_x+2; } else{
keyBlock[threadIdx.x] = thread_x;
}
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+1]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+1];
keyBlock[threadIdx.x] = thread_x+1; } else{
keyBlock[threadIdx.x] = thread_x;
}
#if LOC_RED_THREADS==32
}
#endif
__syncthreads();
//write key/value pair to global
if ((threadIdx.x==0)){
devReductionKeysOut[blockIdx.x]=keyBlock[threadIdx.x];
devReductionValuesOut[blockIdx.x]=valBlock[threadIdx.x];
//if we're the lowest thread in the last block, increment wavefront radius at key position
if ((devReductionKeysOut[0]==0.0f) || (devReductionKeysOut[1]==0.0f) || (devReductionKeysOut[2]==0.0f)
|| (devReductionKeysOut[3]==0.0f)){
return;
} else {
int key = sizeTheta*sizePhi -1;
int value = devReductionValuesOut[0];
if (devReductionValuesOut[1] < value ){
key=devReductionKeysOut[1];
value=devReductionValuesOut[1];
}
if (devReductionValuesOut[2] < value ){
key=devReductionKeysOut[2];
value=devReductionValuesOut[2];
}
if (devReductionValuesOut[3] < value ){
key=devReductionKeysOut[3];
value=devReductionValuesOut[3];
}
float oldRadius = devRadius[(key / sizeTheta) * sizePhi + key % sizePhi];
devRadius[(key / sizeTheta) * sizePhi + key % sizePhi] = oldRadius + 1.0f;
devTTable[(key / sizeTheta) * sizePhi + key % sizePhi + (int) oldRadius* sizePhi*sizeTheta]= value;
devReductionKeysOut[0]=0.0f;
devReductionKeysOut[1]=0.0f;
devReductionKeysOut[2]=0.0f;
devReductionKeysOut[3]=0.0f;
}
}
} //end kernel
| 36b5f61672d01f63c2d63c475a45446e95d8f0de.cu | //a reduction on a single MP with a little bit 'o ugly
//at the end to advance the wavefront
//WJB 03/11
#include <cuda.h>
#include <stdio.h>
//__constant__ int sizeTheta;
//__constant__ int sizePhi;
//
__global__ void advanceWave(float *devReductionKeysIn,
float *devReductionValuesIn,
float *devReductionKeysOut,
float *devReductionValuesOut,
float *devRadius,
float *devTTable,
int sizeTheta,
int sizePhi){
const int thread_x = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float valBlock[LOC_RED_THREADS];
__shared__ float keyBlock[LOC_RED_THREADS];
//__shared__ int flag;
valBlock[threadIdx.x] = devReductionValuesIn[thread_x];
keyBlock[threadIdx.x] = devReductionKeysIn[thread_x];
#if LOC_RED_THREADS > 511
if (threadIdx.x<256){
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+256]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+256];
keyBlock[threadIdx.x] = thread_x+256; } else{
keyBlock[threadIdx.x] = thread_x;
}
}
__syncthreads();
#endif
#if LOC_RED_THREADS > 255
if (threadIdx.x<128){
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+128]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+128];
keyBlock[threadIdx.x] = thread_x+128; } else{
keyBlock[threadIdx.x] = thread_x;
}
}
__syncthreads();
#endif
#if LOC_RED_THREADS > 127
if (threadIdx.x<64){
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+64]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+64];
keyBlock[threadIdx.x] = thread_x+64;
} else{
keyBlock[threadIdx.x] = thread_x;
}
}
__syncthreads();
#endif
#if LOC_RED_THREADS > 63
if (threadIdx.x<32){
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+32]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+32];
keyBlock[threadIdx.x] = thread_x+32; } else{
keyBlock[threadIdx.x] = thread_x;
}
}
#elif LOC_RED_THREADS==32
if (threadIdx.x<16){
#endif
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+16]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+16];
keyBlock[threadIdx.x] = thread_x+16; } else{
keyBlock[threadIdx.x] = thread_x;
}
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+8]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+8];
keyBlock[threadIdx.x] = thread_x+8; } else{
keyBlock[threadIdx.x] = thread_x;
}
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+4]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+4];
keyBlock[threadIdx.x] = thread_x+4; } else{
keyBlock[threadIdx.x] = thread_x;
}
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+2]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+2];
keyBlock[threadIdx.x] = thread_x+2; } else{
keyBlock[threadIdx.x] = thread_x;
}
if (valBlock[threadIdx.x] > valBlock[threadIdx.x+1]){
valBlock[threadIdx.x] = valBlock[threadIdx.x+1];
keyBlock[threadIdx.x] = thread_x+1; } else{
keyBlock[threadIdx.x] = thread_x;
}
#if LOC_RED_THREADS==32
}
#endif
__syncthreads();
//write key/value pair to global
if ((threadIdx.x==0)){
devReductionKeysOut[blockIdx.x]=keyBlock[threadIdx.x];
devReductionValuesOut[blockIdx.x]=valBlock[threadIdx.x];
//if we're the lowest thread in the last block, increment wavefront radius at key position
if ((devReductionKeysOut[0]==0.0f) || (devReductionKeysOut[1]==0.0f) || (devReductionKeysOut[2]==0.0f)
|| (devReductionKeysOut[3]==0.0f)){
return;
} else {
int key = sizeTheta*sizePhi -1;
int value = devReductionValuesOut[0];
if (devReductionValuesOut[1] < value ){
key=devReductionKeysOut[1];
value=devReductionValuesOut[1];
}
if (devReductionValuesOut[2] < value ){
key=devReductionKeysOut[2];
value=devReductionValuesOut[2];
}
if (devReductionValuesOut[3] < value ){
key=devReductionKeysOut[3];
value=devReductionValuesOut[3];
}
float oldRadius = devRadius[(key / sizeTheta) * sizePhi + key % sizePhi];
devRadius[(key / sizeTheta) * sizePhi + key % sizePhi] = oldRadius + 1.0f;
devTTable[(key / sizeTheta) * sizePhi + key % sizePhi + (int) oldRadius* sizePhi*sizeTheta]= value;
devReductionKeysOut[0]=0.0f;
devReductionKeysOut[1]=0.0f;
devReductionKeysOut[2]=0.0f;
devReductionKeysOut[3]=0.0f;
}
}
} //end kernel
|
af449cda61e4872dbb914bae43c950a03c947a62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* camera.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "noerrors.h"
// +-----------------------------------------------------------------------------+
// | RandomPointOnLens |
// | Generate a random point on the lens. LH2'19|
// +-----------------------------------------------------------------------------+
__inline __device__ float3 RandomPointOnLens( const float r0, float r1, const float3 pos, const float aperture, const float3 right, const float3 up )
{
const float blade = (int)(r0 * 9);
float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f;
float x1, y1, x2, y2;
__sincosf( blade * PI / 4.5f, &x1, &y1 );
__sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 );
if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2;
const float xr = x1 * r1 + x2 * r2;
const float yr = y1 * r1 + y2 * r2;
return pos + aperture * (right * xr + up * yr);
}
// +-----------------------------------------------------------------------------+
// | generateEyeRaysKernel |
// | Generate primary rays, to be traced by Optix Prime. LH2'19|
// +-----------------------------------------------------------------------------+
__device__ void generateEyeRaysKernel( const uint rayIdx, Ray4* rayBuffer, float4* pathStateData,
const uint R0, const uint* blueNoise, const int pass,
const float3 pos, const float3 right, const float3 up, const float aperture,
const float3 p1, const int4 screenParams )
{
// get pixel coordinate
const int scrhsize = screenParams.x & 0xffff;
const int scrvsize = screenParams.x >> 16;
const uint tileIdx = rayIdx >> 8;
const uint xtiles = scrhsize / 16;
const uint tilex = tileIdx % xtiles, tiley = tileIdx / xtiles;
const uint x_in_tile = (rayIdx & 15);
const uint y_in_tile = (rayIdx & 255) >> 4;
uint x = tilex * 16 + x_in_tile, y = tiley * 16 + y_in_tile, sampleIndex = pass + y / scrvsize;
y %= scrvsize;
// get random numbers
float3 posOnPixel, posOnLens;
// depth of field camera for no filter
float r0, r1, r2, r3;
if (sampleIndex < 256)
{
r0 = blueNoiseSampler( blueNoise, x, y, sampleIndex, 0 );
r1 = blueNoiseSampler( blueNoise, x, y, sampleIndex, 1 );
r2 = blueNoiseSampler( blueNoise, x, y, sampleIndex, 2 );
r3 = blueNoiseSampler( blueNoise, x, y, sampleIndex, 3 );
}
else
{
uint seed = WangHash( rayIdx + R0 );
r0 = RandomFloat( seed ), r1 = RandomFloat( seed );
r2 = RandomFloat( seed ), r3 = RandomFloat( seed );
}
posOnPixel = p1 + ((float)x + r0) * (right / (float)scrhsize) + ((float)y + r1) * (up / (float)scrvsize);
posOnLens = RandomPointOnLens( r2, r3, pos, aperture, right, up );
const float3 rayDir = normalize( posOnPixel - posOnLens );
// initialize path state
rayBuffer[rayIdx].O4 = make_float4( posOnLens, geometryEpsilon );
rayBuffer[rayIdx].D4 = make_float4( rayDir, 1e34f );
pathStateData[rayIdx * 2 + 0] = make_float4( 1, 1, 1, __uint_as_float( ((x + (y + (sampleIndex - pass) * scrvsize) * scrhsize) << 8) + 1 /* S_SPECULAR */ ) );
pathStateData[rayIdx * 2 + 1] = make_float4( 1, 0, 0, 0 );
}
// +-----------------------------------------------------------------------------+
// | generateEyeRaysPersistent |
// | Persistent kernel for generating primary rays. LH2'19|
// +-----------------------------------------------------------------------------+
__global__ void __launch_bounds__( 256 /* max block size */, 1 /* min blocks per sm */ )
generateEyeRaysPersistent( int pathCount, Ray4* rayBuffer, float4* pathStateData,
const uint R0, const uint* blueNoise, const int pass,
const float3 pos, const float3 right, const float3 up, const float aperture, const float3 p1,
const int4 screenParams )
{
__shared__ volatile int baseIdx[32];
int lane = threadIdx.x & 31, warp = threadIdx.x >> 5;
__syncthreads();
while (1)
{
if (lane == 0) baseIdx[warp] = atomicAdd( &counters->generated, 32 );
int jobIndex = baseIdx[warp] + lane;
if (__all_sync( THREADMASK, jobIndex >= pathCount )) break; // need to do the path with all threads in the warp active
if (jobIndex < pathCount) generateEyeRaysKernel( jobIndex,
rayBuffer, pathStateData,
R0, blueNoise, pass,
pos, right, up, aperture, p1,
screenParams );
}
}
// +-----------------------------------------------------------------------------+
// | generateEyeRays |
// | Entry point for the persistent generateEyeRays kernel. LH2'19|
// +-----------------------------------------------------------------------------+
__host__ void generateEyeRays( int smcount, Ray4* rayBuffer, float4* pathStateData,
const uint R0, const uint* blueNoise, const int pass,
const float aperture, const float3 camPos, const float3 right, const float3 up, const float3 p1,
const int4 screenParams )
{
const int scrwidth = screenParams.x & 0xffff;
const int scrheight = screenParams.x >> 16;
const int scrspp = screenParams.y & 255;
const int pathCount = scrwidth * scrheight * scrspp;
InitCountersForExtend_Kernel << <1, 32 >> > (pathCount);
generateEyeRaysPersistent << < smcount, 256 >> > (pathCount, rayBuffer, pathStateData, R0, blueNoise, pass, camPos, right, up, aperture, p1, screenParams);
}
// EOF | af449cda61e4872dbb914bae43c950a03c947a62.cu | /* camera.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "noerrors.h"
// +-----------------------------------------------------------------------------+
// | RandomPointOnLens |
// | Generate a random point on the lens. LH2'19|
// +-----------------------------------------------------------------------------+
__inline __device__ float3 RandomPointOnLens( const float r0, float r1, const float3 pos, const float aperture, const float3 right, const float3 up )
{
const float blade = (int)(r0 * 9);
float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f;
float x1, y1, x2, y2;
__sincosf( blade * PI / 4.5f, &x1, &y1 );
__sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 );
if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2;
const float xr = x1 * r1 + x2 * r2;
const float yr = y1 * r1 + y2 * r2;
return pos + aperture * (right * xr + up * yr);
}
// +-----------------------------------------------------------------------------+
// | generateEyeRaysKernel |
// | Generate primary rays, to be traced by Optix Prime. LH2'19|
// +-----------------------------------------------------------------------------+
__device__ void generateEyeRaysKernel( const uint rayIdx, Ray4* rayBuffer, float4* pathStateData,
const uint R0, const uint* blueNoise, const int pass,
const float3 pos, const float3 right, const float3 up, const float aperture,
const float3 p1, const int4 screenParams )
{
// get pixel coordinate
const int scrhsize = screenParams.x & 0xffff;
const int scrvsize = screenParams.x >> 16;
const uint tileIdx = rayIdx >> 8;
const uint xtiles = scrhsize / 16;
const uint tilex = tileIdx % xtiles, tiley = tileIdx / xtiles;
const uint x_in_tile = (rayIdx & 15);
const uint y_in_tile = (rayIdx & 255) >> 4;
uint x = tilex * 16 + x_in_tile, y = tiley * 16 + y_in_tile, sampleIndex = pass + y / scrvsize;
y %= scrvsize;
// get random numbers
float3 posOnPixel, posOnLens;
// depth of field camera for no filter
float r0, r1, r2, r3;
if (sampleIndex < 256)
{
r0 = blueNoiseSampler( blueNoise, x, y, sampleIndex, 0 );
r1 = blueNoiseSampler( blueNoise, x, y, sampleIndex, 1 );
r2 = blueNoiseSampler( blueNoise, x, y, sampleIndex, 2 );
r3 = blueNoiseSampler( blueNoise, x, y, sampleIndex, 3 );
}
else
{
uint seed = WangHash( rayIdx + R0 );
r0 = RandomFloat( seed ), r1 = RandomFloat( seed );
r2 = RandomFloat( seed ), r3 = RandomFloat( seed );
}
posOnPixel = p1 + ((float)x + r0) * (right / (float)scrhsize) + ((float)y + r1) * (up / (float)scrvsize);
posOnLens = RandomPointOnLens( r2, r3, pos, aperture, right, up );
const float3 rayDir = normalize( posOnPixel - posOnLens );
// initialize path state
rayBuffer[rayIdx].O4 = make_float4( posOnLens, geometryEpsilon );
rayBuffer[rayIdx].D4 = make_float4( rayDir, 1e34f );
pathStateData[rayIdx * 2 + 0] = make_float4( 1, 1, 1, __uint_as_float( ((x + (y + (sampleIndex - pass) * scrvsize) * scrhsize) << 8) + 1 /* S_SPECULAR */ ) );
pathStateData[rayIdx * 2 + 1] = make_float4( 1, 0, 0, 0 );
}
// +-----------------------------------------------------------------------------+
// | generateEyeRaysPersistent |
// | Persistent kernel for generating primary rays. LH2'19|
// +-----------------------------------------------------------------------------+
__global__ void __launch_bounds__( 256 /* max block size */, 1 /* min blocks per sm */ )
generateEyeRaysPersistent( int pathCount, Ray4* rayBuffer, float4* pathStateData,
const uint R0, const uint* blueNoise, const int pass,
const float3 pos, const float3 right, const float3 up, const float aperture, const float3 p1,
const int4 screenParams )
{
__shared__ volatile int baseIdx[32];
int lane = threadIdx.x & 31, warp = threadIdx.x >> 5;
__syncthreads();
while (1)
{
if (lane == 0) baseIdx[warp] = atomicAdd( &counters->generated, 32 );
int jobIndex = baseIdx[warp] + lane;
if (__all_sync( THREADMASK, jobIndex >= pathCount )) break; // need to do the path with all threads in the warp active
if (jobIndex < pathCount) generateEyeRaysKernel( jobIndex,
rayBuffer, pathStateData,
R0, blueNoise, pass,
pos, right, up, aperture, p1,
screenParams );
}
}
// +-----------------------------------------------------------------------------+
// | generateEyeRays |
// | Entry point for the persistent generateEyeRays kernel. LH2'19|
// +-----------------------------------------------------------------------------+
__host__ void generateEyeRays( int smcount, Ray4* rayBuffer, float4* pathStateData,
const uint R0, const uint* blueNoise, const int pass,
const float aperture, const float3 camPos, const float3 right, const float3 up, const float3 p1,
const int4 screenParams )
{
const int scrwidth = screenParams.x & 0xffff;
const int scrheight = screenParams.x >> 16;
const int scrspp = screenParams.y & 255;
const int pathCount = scrwidth * scrheight * scrspp;
InitCountersForExtend_Kernel << <1, 32 >> > (pathCount);
generateEyeRaysPersistent << < smcount, 256 >> > (pathCount, rayBuffer, pathStateData, R0, blueNoise, pass, camPos, right, up, aperture, p1, screenParams);
}
// EOF |
b4660e57ee47d2ada8b977aa46a31c6f67b66e45.hip | // !!! This is a file automatically generated by hipify!!!
// generated by gen_batch_cuda_conv_bias_kern_impls.py
#include "../batch_conv_bias_int8_gemm_ncdiv4hw4.cuinl"
template void megdnn::cuda::batch_conv_bias::do_batch_conv_bias_int8_gemm_ncdiv4hw4<
PerChannelBiasVisitor,
IConvEpilogue<
Activation<megdnn::param_enumv::BatchConvBias::NonlineMode::IDENTITY>>>(
const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias,
IConvEpilogue<
Activation<megdnn::param_enumv::BatchConvBias::NonlineMode::IDENTITY>>
epilogue,
const ConvParam& param, float alpha, float beta, hipStream_t stream);
| b4660e57ee47d2ada8b977aa46a31c6f67b66e45.cu | // generated by gen_batch_cuda_conv_bias_kern_impls.py
#include "../batch_conv_bias_int8_gemm_ncdiv4hw4.cuinl"
template void megdnn::cuda::batch_conv_bias::do_batch_conv_bias_int8_gemm_ncdiv4hw4<
PerChannelBiasVisitor,
IConvEpilogue<
Activation<megdnn::param_enumv::BatchConvBias::NonlineMode::IDENTITY>>>(
const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias,
IConvEpilogue<
Activation<megdnn::param_enumv::BatchConvBias::NonlineMode::IDENTITY>>
epilogue,
const ConvParam& param, float alpha, float beta, cudaStream_t stream);
|
100baf0863528844a4c00e0ec96f1d98747dfca4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ZeroMeanImpl(float* solutions, int rowSize, int matCount) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int tid = threadIdx.x;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ double beta[BLOCK_SIZE];
__shared__ double line[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
solutions += matrixIdx * rowSize;
beta[tid] = col != (rowSize - 1) ? solutions[col] : 0;
line[tid] = beta[tid];
__syncthreads();
for (int s = rowSize >> 1; s > 0; s >>= 1) {
if (col < s) {
line[tid] += line[tid + s];
}
__syncthreads();
}
beta[tid] -= line[rowSize * inBlockOffset] / rowSize;
solutions[col] = beta[tid];
} | 100baf0863528844a4c00e0ec96f1d98747dfca4.cu | #include "includes.h"
__global__ void ZeroMeanImpl(float* solutions, int rowSize, int matCount) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int tid = threadIdx.x;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ double beta[BLOCK_SIZE];
__shared__ double line[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
solutions += matrixIdx * rowSize;
beta[tid] = col != (rowSize - 1) ? solutions[col] : 0;
line[tid] = beta[tid];
__syncthreads();
for (int s = rowSize >> 1; s > 0; s >>= 1) {
if (col < s) {
line[tid] += line[tid + s];
}
__syncthreads();
}
beta[tid] -= line[rowSize * inBlockOffset] / rowSize;
solutions[col] = beta[tid];
} |
346c1a196d50ff6355023d8b76ca84144f28d3d9.hip | // !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include <unistd.h>
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <fstream>
#include <iostream>
#include <random>
#include <sstream>
#include "dataloader.h"
#include "dataset.h"
#include "helpers_cuda.h"
#include "helpers_random.h"
#include "helpers_training.h"
#include "kernels_cuda.h"
#define ACCURACY_CPU_DOUBLE_CHECK
std::default_random_engine generator;
hiprandGenerator_t gen;
hipblasHandle_t handle;
using namespace bcpnn::helpers::cuda;
using namespace bcpnn::helpers::training;
using namespace bcpnn::helpers::random;
using namespace bcpnn::kernels::cuda;
using namespace bcpnn;
//#define hipMalloc hipMallocManaged
template <typename REAL>
class SupervisedInput {
public:
REAL *inputs;
REAL *labels;
size_t count;
};
template <typename REAL>
class UnsupervisedInput {
public:
REAL *inputs;
size_t count;
};
template <typename REAL>
class Allocation {
public:
virtual ~Allocation() {}
REAL *activation;
};
template <typename REAL>
class Layer {
public:
virtual void compute_batch(Allocation<REAL> *alloc,
UnsupervisedInput<REAL> *inputs) = 0;
virtual void train_batch(Allocation<REAL> *alloc,
SupervisedInput<REAL> *inputs) = 0;
virtual void train_batch(Allocation<REAL> *alloc,
UnsupervisedInput<REAL> *inputs) = 0;
virtual void train_finalize(Allocation<REAL> *alloc) = 0;
virtual Allocation<REAL> *allocate_compute(size_t maximal_batch_size) = 0;
virtual Allocation<REAL> *allocate_training(size_t maximal_batch_size) = 0;
};
template <typename REAL>
class MaskedDenseLayer;
template <typename REAL>
class MaskedDenseLayerComputeAllocation;
template <typename REAL>
class MaskedDenseLayerTrainingAllocation;
template <typename REAL>
class MaskedDenseLayer : public Layer<REAL> {
public:
MaskedDenseLayer(size_t n_inputs, size_t n_hypercolumns,
size_t n_minicolumns);
void compute_batch(Allocation<REAL> *alloc, UnsupervisedInput<REAL> *inputs);
void train_batch(Allocation<REAL> *alloc, SupervisedInput<REAL> *inputs);
void train_batch(Allocation<REAL> *alloc, UnsupervisedInput<REAL> *inputs);
void train_finalize(Allocation<REAL> *alloc);
Allocation<REAL> *allocate_compute(size_t maximal_batch_size);
Allocation<REAL> *allocate_training(size_t maximal_batch_size);
REAL *weights;
REAL *bias;
REAL taupdt;
REAL khalf;
REAL pmin;
REAL taubdt;
REAL initial_Ci;
REAL initial_Cj;
REAL initial_Cij;
size_t n_inputs_;
size_t n_outputs_;
size_t n_hypercolumns_;
size_t n_minicolumns_;
};
template <typename REAL>
class MaskedDenseLayerComputeAllocation : public Allocation<REAL> {
public:
};
template <typename REAL>
class MaskedDenseLayerTrainingAllocation : public Allocation<REAL> {
public:
MaskedDenseLayerComputeAllocation<REAL> *to_compute_allocation();
REAL *Ci;
REAL *Cj;
REAL *Cij;
REAL *kbi;
uint8_t *wmask;
size_t update_hypercolumn;
std::vector<size_t> hc_permutation;
size_t hc_pos;
};
template <typename REAL>
MaskedDenseLayer<REAL>::MaskedDenseLayer(size_t n_inputs, size_t n_hypercolumns,
size_t n_minicolumns)
: weights(NULL),
bias(NULL),
n_inputs_(n_inputs),
n_outputs_(n_hypercolumns * n_minicolumns),
n_hypercolumns_(n_hypercolumns),
n_minicolumns_(n_minicolumns) {
CUDA_CALL(
hipMalloc((void **)&weights, n_inputs_ * n_outputs_ * sizeof(REAL)));
CUDA_CALL(hipMalloc((void **)&bias, n_outputs_ * sizeof(REAL)));
CURAND_CALL(TcurandGenerateNormal<REAL>(gen, weights, n_inputs_ * n_outputs_,
0, 0.1));
CURAND_CALL(TcurandGenerateUniform<REAL>(gen, bias, n_outputs_));
cuda_scale_array<REAL>(bias, 0.1, n_outputs_);
}
template <typename REAL>
Allocation<REAL> *MaskedDenseLayer<REAL>::allocate_compute(
size_t maximal_batch_size) {
MaskedDenseLayerComputeAllocation<REAL> *alloc =
new MaskedDenseLayerComputeAllocation<REAL>;
CUDA_CALL(hipMalloc((void **)&alloc->activation,
maximal_batch_size * n_outputs_ * sizeof(REAL)));
return alloc;
}
template <typename REAL>
Allocation<REAL> *MaskedDenseLayer<REAL>::allocate_training(
size_t maximal_batch_size) {
MaskedDenseLayerTrainingAllocation<REAL> *alloc =
new MaskedDenseLayerTrainingAllocation<REAL>;
CUDA_CALL(hipMalloc((void **)&alloc->activation,
maximal_batch_size * n_outputs_ * sizeof(REAL)));
CUDA_CALL(hipMalloc((void **)&alloc->Ci, n_inputs_ * sizeof(REAL)));
CUDA_CALL(hipMalloc((void **)&alloc->Cj, n_outputs_ * sizeof(REAL)));
CUDA_CALL(
hipMalloc((void **)&alloc->Cij, n_inputs_ * n_outputs_ * sizeof(REAL)));
CUDA_CALL(hipMalloc((void **)&alloc->wmask,
n_inputs_ * n_hypercolumns_ * sizeof(uint8_t)));
CUDA_CALL(hipMalloc((void **)&alloc->kbi, n_outputs_ * sizeof(REAL)));
cuda_initialize_array<REAL>(alloc->Ci, initial_Ci, n_inputs_);
cuda_initialize_array<REAL>(alloc->Cj, initial_Cj, n_outputs_);
cuda_initialize_array<REAL>(alloc->Cij, initial_Cij, n_inputs_ * n_outputs_);
cuda_initialize_array<REAL>(alloc->kbi, 1, n_outputs_);
initialize_wmask(alloc->wmask, n_inputs_, n_hypercolumns_);
alloc->update_hypercolumn = 0;
for (size_t i = 0; i < n_hypercolumns_; ++i) {
alloc->hc_permutation.push_back(i);
}
std::shuffle(alloc->hc_permutation.begin(), alloc->hc_permutation.end(),
generator);
return alloc;
}
template <typename REAL>
void MaskedDenseLayer<REAL>::compute_batch(Allocation<REAL> *alloc_,
UnsupervisedInput<REAL> *inputs) {
MaskedDenseLayerComputeAllocation<REAL> *alloc =
(MaskedDenseLayerComputeAllocation<REAL> *)alloc_;
REAL v_one = 1;
REAL v_zero = 0;
CUBLAS_CALL(cublasgemm<REAL>(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n_outputs_,
inputs->count, n_inputs_, &v_one, this->weights,
n_outputs_, inputs->inputs, n_inputs_, &v_zero,
alloc->activation, n_outputs_));
cuda_add_bias(alloc->activation, inputs->count, n_outputs_, this->bias);
cuda_softmax(alloc->activation, inputs->count * n_hypercolumns_,
n_minicolumns_);
}
template <typename REAL>
void MaskedDenseLayer<REAL>::train_batch(Allocation<REAL> *alloc,
SupervisedInput<REAL> *inputs) {
std::cerr << "MaskedDenseLayer supervised training unimplemented"
<< std::endl;
exit(1);
/*
float v_one = 1;
float v_zero = 0;
CUBLAS_CALL(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n_outputs_,
inputs->count, n_inputs_, &v_one, this->weights, n_outputs_, inputs->inputs,
n_inputs_, &v_zero, alloc->activation, n_outputs_));
cuda_add_bias(alloc->activation, inputs->count, n_outputs_, this->bias);
cuda_softmax(alloc->activation, inputs->count * n_hypercolumns, n_minicolumns);
cuda_update_counters(alloc->Ci, alloc->Cj, alloc->Cij, inputs->inputs,
alloc->activation, inputs->count, n_inputs_, n_outputs_, taupdt); if
(hypercolumn < n_hypercolumns) { cuda_update_weights(W1, Ci, Cj, Cij, taupdt/2,
n_inputs_, n_outputs_); cuda_update_bias_regularized(B1, kbi, Cj, taupdt/2,
khalf, pmin, taubdt, n_outputs_); cuda_update_mask(wmask, W1, Ci, Cj, Cij,
taupdt/2, n_inputs_, n_outputs_, hypercolumn, n_hypercolumns, n_minicolumns,
16); cuda_apply_mask(W1, wmask, n_inputs_, n_outputs_, n_hypercolumns,
n_minicolumns);
}
*/
}
template <typename REAL>
void MaskedDenseLayer<REAL>::train_batch(Allocation<REAL> *alloc_,
UnsupervisedInput<REAL> *inputs) {
MaskedDenseLayerTrainingAllocation<REAL> *alloc =
(MaskedDenseLayerTrainingAllocation<REAL> *)alloc_;
REAL v_one = 1;
REAL v_zero = 0;
CUBLAS_CALL(cublasgemm<REAL>(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n_outputs_,
inputs->count, n_inputs_, &v_one, this->weights,
n_outputs_, inputs->inputs, n_inputs_, &v_zero,
alloc->activation, n_outputs_));
cuda_add_bias(alloc->activation, inputs->count, n_outputs_, this->bias);
cuda_softmax(alloc->activation, inputs->count * n_hypercolumns_,
n_minicolumns_);
cuda_update_counters(alloc->Ci, alloc->Cj, alloc->Cij, inputs->inputs,
alloc->activation, inputs->count, n_inputs_, n_outputs_,
taupdt);
cuda_update_weights(this->weights, alloc->Ci, alloc->Cj, alloc->Cij,
taupdt / 2, n_inputs_, n_outputs_);
cuda_update_bias_regularized(this->bias, alloc->kbi, alloc->Cj, taupdt / 2,
khalf, pmin, taubdt, n_outputs_);
if (alloc->update_hypercolumn < n_hypercolumns_) {
cuda_update_mask(alloc->wmask, this->weights, alloc->Ci, alloc->Cj,
alloc->Cij, taupdt / 2, n_inputs_, n_outputs_,
alloc->update_hypercolumn, n_hypercolumns_, n_minicolumns_,
16);
}
cuda_apply_mask(this->weights, alloc->wmask, n_inputs_, n_outputs_,
n_hypercolumns_, n_minicolumns_);
}
template <typename REAL>
void MaskedDenseLayer<REAL>::train_finalize(Allocation<REAL> *alloc) {}
template <typename REAL>
class DenseLayer;
template <typename REAL>
class DenseLayerComputeAllocation;
template <typename REAL>
class DenseLayerTrainingAllocation;
template <typename REAL>
class DenseLayer : public Layer<REAL> {
public:
DenseLayer(size_t n_inputs, size_t n_hypercolumns, size_t n_minicolumns);
void compute_batch(Allocation<REAL> *alloc, UnsupervisedInput<REAL> *inputs);
void train_batch(Allocation<REAL> *alloc, SupervisedInput<REAL> *inputs);
void train_batch(Allocation<REAL> *alloc, UnsupervisedInput<REAL> *inputs);
void train_finalize(Allocation<REAL> *alloc);
Allocation<REAL> *allocate_compute(size_t maximal_batch_size);
Allocation<REAL> *allocate_training(size_t maximal_batch_size);
REAL *weights;
REAL *bias;
REAL taupdt;
REAL initial_Ci;
REAL initial_Cj;
REAL initial_Cij;
size_t n_inputs_;
size_t n_outputs_;
size_t n_hypercolumns_;
size_t n_minicolumns_;
};
template <typename REAL>
class DenseLayerComputeAllocation : public Allocation<REAL> {
public:
};
template <typename REAL>
class DenseLayerTrainingAllocation : public Allocation<REAL> {
public:
DenseLayerComputeAllocation<REAL> *to_compute_allocation();
REAL *Ci;
REAL *Cj;
REAL *Cij;
REAL *kbi;
uint8_t *wmask;
};
template <typename REAL>
DenseLayer<REAL>::DenseLayer(size_t n_inputs, size_t n_hypercolumns,
size_t n_minicolumns)
: weights(NULL),
bias(NULL),
n_inputs_(n_inputs),
n_outputs_(n_hypercolumns * n_minicolumns),
n_hypercolumns_(n_hypercolumns),
n_minicolumns_(n_minicolumns) {
CUDA_CALL(
hipMalloc((void **)&weights, n_inputs_ * n_outputs_ * sizeof(REAL)));
CUDA_CALL(hipMalloc((void **)&bias, n_outputs_ * sizeof(REAL)));
}
template <typename REAL>
Allocation<REAL> *DenseLayer<REAL>::allocate_compute(
size_t maximal_batch_size) {
DenseLayerComputeAllocation<REAL> *alloc =
new DenseLayerComputeAllocation<REAL>;
CUDA_CALL(hipMalloc((void **)&alloc->activation,
maximal_batch_size * n_outputs_ * sizeof(REAL)));
return alloc;
}
template <typename REAL>
Allocation<REAL> *DenseLayer<REAL>::allocate_training(
size_t maximal_batch_size) {
DenseLayerTrainingAllocation<REAL> *alloc =
new DenseLayerTrainingAllocation<REAL>;
CUDA_CALL(hipMalloc((void **)&alloc->activation,
maximal_batch_size * n_outputs_ * sizeof(REAL)));
CUDA_CALL(hipMalloc((void **)&alloc->Ci, n_inputs_ * sizeof(REAL)));
CUDA_CALL(hipMalloc((void **)&alloc->Cj, n_outputs_ * sizeof(REAL)));
CUDA_CALL(
hipMalloc((void **)&alloc->Cij, n_inputs_ * n_outputs_ * sizeof(REAL)));
cuda_initialize_array<REAL>(alloc->Ci, initial_Ci, n_inputs_);
cuda_initialize_array<REAL>(alloc->Cj, initial_Cj, n_outputs_);
cuda_initialize_array<REAL>(alloc->Cij, initial_Cij, n_inputs_ * n_outputs_);
return alloc;
}
template <typename REAL>
void DenseLayer<REAL>::compute_batch(Allocation<REAL> *alloc_,
UnsupervisedInput<REAL> *inputs) {
DenseLayerComputeAllocation<REAL> *alloc =
(DenseLayerComputeAllocation<REAL> *)alloc_;
REAL v_one = 1;
REAL v_zero = 0;
CUBLAS_CALL(cublasgemm<REAL>(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n_outputs_,
inputs->count, n_inputs_, &v_one, this->weights,
n_outputs_, inputs->inputs, n_inputs_, &v_zero,
alloc->activation, n_outputs_));
cuda_add_bias(alloc->activation, inputs->count, n_outputs_, this->bias);
cuda_softmax(alloc->activation, inputs->count * n_hypercolumns_,
n_minicolumns_);
}
template <typename REAL>
void DenseLayer<REAL>::train_batch(Allocation<REAL> *alloc_,
SupervisedInput<REAL> *inputs) {
DenseLayerTrainingAllocation<REAL> *alloc =
(DenseLayerTrainingAllocation<REAL> *)alloc_;
REAL v_one = 1;
REAL v_zero = 0;
CUBLAS_CALL(cublasgemm<REAL>(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n_outputs_,
inputs->count, n_inputs_, &v_one, this->weights,
n_outputs_, inputs->inputs, n_inputs_, &v_zero,
alloc->activation, n_outputs_));
cuda_add_bias(alloc->activation, inputs->count, n_outputs_, this->bias);
cuda_softmax(alloc->activation, inputs->count * n_hypercolumns_,
n_minicolumns_);
cuda_update_counters(alloc->Ci, alloc->Cj, alloc->Cij, inputs->inputs,
inputs->labels, inputs->count, n_inputs_, n_outputs_,
taupdt);
}
template <typename REAL>
void DenseLayer<REAL>::train_batch(Allocation<REAL> *alloc,
UnsupervisedInput<REAL> *inputs) {
std::cerr << "DenseLayer unsupervised training unimplemented" << std::endl;
exit(1);
#if 0
float v_one = 1;
float v_zero = 0;
CUBLAS_CALL(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n_outputs_, inputs->count, n_inputs, &v_one, this->weights, n_outputs_, inputs->inputs, n_inputs, &v_zero, alloc->activation, n_outputs_));
cuda_add_bias(alloc->activation, inputs->count, n_outputs_, this->bias);
cuda_softmax(alloc->activation, inputs->count * n_hypercolumns, n_minicolumns);
cuda_update_counters(alloc->Ci, alloc->Cj, alloc->Cij, inputs->inputs, alloc->activation, inputs->count, n_inputs, n_outputs_, taupdt);
cuda_update_weights(this->weights, alloc->Ci, alloc->Cj, alloc->Cij, taupdt/2, n_inputs, n_outputs_);
cuda_update_bias_regularized(this->bias, this->bias, this->bias, taupdt/2, khalf, pmin, taubdt, n_outputs_);
size_t hypercolumn = n_hypercolumns;
if (hypercolumn < n_hypercolumns) {
cuda_update_mask(this->bias, this->weights, alloc->Ci, alloc->Cj, alloc->Cij, taupdt/2, n_inputs, n_outputs_, hypercolumn, n_hypercolumns, n_minicolumns, 16);
}
cuda_apply_mask(this->weights, this->weights, n_inputs, n_outputs_, n_hypercolumns, n_minicolumns);
#endif
}
template <typename REAL>
void DenseLayer<REAL>::train_finalize(Allocation<REAL> *alloc_) {
DenseLayerTrainingAllocation<REAL> *alloc =
(DenseLayerTrainingAllocation<REAL> *)alloc_;
cuda_update_weights(this->weights, alloc->Ci, alloc->Cj, alloc->Cij,
taupdt / 2, n_inputs_, n_outputs_);
cuda_update_bias(this->bias, alloc->Cj, taupdt / 2, n_outputs_);
}
template <typename REAL>
class Network {
public:
// void add_layer(Layer * layer);
void train_layer(dataloader<REAL> &loader, size_t maximal_batch_size,
size_t layer, size_t epochs);
double evaluate(dataset_t<REAL, REAL> &dataset, size_t maximal_batch_size);
std::vector<Layer<REAL> *> layers_;
};
template <typename REAL>
void Network<REAL>::train_layer(dataloader<REAL> &loader,
size_t maximal_batch_size, size_t layer,
size_t epochs) {
std::vector<Allocation<REAL> *> allocs;
for (size_t i = 0; i <= layer; ++i) {
if (i < layer) {
allocs.push_back(layers_[i]->allocate_compute(maximal_batch_size));
} else {
allocs.push_back(layers_[i]->allocate_training(maximal_batch_size));
}
}
for (size_t epoch = 0; epoch < epochs; ++epoch) {
std::pair<REAL *, REAL *> p = loader.queue_get_fresh();
size_t pos = 0;
size_t n_inputs = loader.get_dataset().rows * loader.get_dataset().cols;
size_t n_outputs = loader.get_dataset().number_of_classes;
size_t n_steps =
(loader.get_dataset().number_of_examples + maximal_batch_size - 1) /
maximal_batch_size;
for (size_t step = 0; step < n_steps; ++step) {
REAL *batch_images = p.first + (pos * n_inputs);
REAL *batch_labels = p.second + (pos * n_outputs);
size_t batch_size_step = min(
maximal_batch_size, loader.get_dataset().number_of_examples - pos);
for (size_t l = 0; l <= layer; ++l) {
MaskedDenseLayerTrainingAllocation<REAL> *dense_training_alloc =
dynamic_cast<MaskedDenseLayerTrainingAllocation<REAL> *>(allocs[l]);
MaskedDenseLayer<REAL> *dense_layer =
dynamic_cast<MaskedDenseLayer<REAL> *>(layers_[l]);
if (dense_layer != nullptr && dense_training_alloc != nullptr) {
size_t n_hypercolumns = dense_layer->n_hypercolumns_;
size_t h =
(epoch > 0 && (step % (n_steps / (n_hypercolumns + 1)) == 0))
? step / (n_steps / (n_hypercolumns + 1))
: n_hypercolumns;
dense_training_alloc->update_hypercolumn = h;
}
if (l + 1 != layers_.size()) {
UnsupervisedInput<REAL> inputs;
inputs.inputs = l == 0 ? batch_images : allocs[l - 1]->activation;
inputs.count = batch_size_step;
if (l == layer) {
layers_[l]->train_batch(allocs[l], &inputs);
} else {
layers_[l]->compute_batch(allocs[l], &inputs);
}
} else {
SupervisedInput<REAL> inputs;
inputs.inputs = l == 0 ? batch_images : allocs[l - 1]->activation;
inputs.labels = batch_labels;
inputs.count = batch_size_step;
layers_[l]->train_batch(allocs[l], &inputs);
}
}
pos += batch_size_step;
}
loader.queue_recycle(p);
if (layer == 0) {
MaskedDenseLayer<REAL> *layer =
dynamic_cast<MaskedDenseLayer<REAL> *>(layers_[0]);
MaskedDenseLayerTrainingAllocation<REAL> *alloc =
dynamic_cast<MaskedDenseLayerTrainingAllocation<REAL> *>(allocs[0]);
if (layer && alloc) {
print_wmask(alloc->wmask, 28, 28, layer->n_hypercolumns_);
}
printf("\nLayer 1/%lu - Epoch : %ld\n\n", layers_.size(), epoch);
} else {
printf("\nLayer %lu/%lu - Epoch : %ld\n\n", layer + 1, layers_.size(),
epoch);
}
}
layers_[layer]->train_finalize(allocs[layer]);
}
template <typename REAL>
double Network<REAL>::evaluate(dataset_t<REAL, REAL> &dataset,
size_t maximal_batch_size) {
std::vector<Allocation<REAL> *> allocs;
int *correct;
REAL *test_images;
REAL *test_labels;
CUDA_CALL(hipMalloc((void **)&correct, sizeof(int)));
CUDA_CALL(hipMemset(correct, 0, sizeof(int)));
#ifdef ACCURACY_CPU_DOUBLE_CHECK
// cpu_correct = 0;
#endif
size_t n_inputs = dataset.rows * dataset.cols;
size_t n_outputs = dataset.number_of_classes;
CUDA_CALL(hipMalloc((void **)&test_images,
dataset.number_of_examples * n_inputs * sizeof(REAL)));
CUDA_CALL(hipMalloc((void **)&test_labels,
dataset.number_of_examples * n_outputs * sizeof(REAL)));
CUDA_CALL(hipMemcpy(test_images, dataset.images,
dataset.number_of_examples * n_inputs * sizeof(REAL),
hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(test_labels, dataset.labels,
dataset.number_of_examples * n_outputs * sizeof(REAL),
hipMemcpyHostToDevice));
for (size_t l = 0; l < layers_.size(); ++l) {
allocs.push_back(layers_[l]->allocate_compute(maximal_batch_size));
}
size_t pos = 0;
for (size_t step = 0;
step < (dataset.number_of_examples + maximal_batch_size - 1) /
maximal_batch_size;
++step) {
REAL *batch_images = test_images + (pos * n_inputs);
REAL *batch_labels = test_labels + (pos * n_outputs);
size_t batch_size_step =
min(maximal_batch_size, dataset.number_of_examples - pos);
for (size_t l = 0; l < layers_.size(); ++l) {
UnsupervisedInput<REAL> inputs;
inputs.inputs =
l == 0 ? batch_images
: ((MaskedDenseLayerComputeAllocation<REAL> *)allocs[l - 1])
->activation;
inputs.count = batch_size_step;
layers_[l]->compute_batch(allocs[l], &inputs);
}
cuda_correct_predictions(
correct,
((DenseLayerComputeAllocation<REAL> *)allocs[layers_.size() - 1])
->activation,
batch_labels, batch_size_step, n_outputs);
pos += batch_size_step;
}
int h_correct;
CUDA_CALL(
hipMemcpy(&h_correct, correct, sizeof(int), hipMemcpyDeviceToHost));
#if 0
#ifdef ACCURACY_CPU_DOUBLE_CHECK
if (cpu_correct != h_correct) {
std::cerr << "CPU and GPU differ on number of correctly predicted images" << std::endl;
exit(1);
}
#endif
#endif
return ((double)h_correct) / dataset.number_of_examples;
}
namespace py = pybind11;
template <typename REAL>
class PyNetwork {
public:
PyNetwork() {}
void add_dense_layer(size_t n_inputs, size_t n_hypercolumns,
size_t n_minicolumns, REAL taupdt, REAL initial_Ci,
REAL initial_Cj, REAL initial_Cij) {
DenseLayer<REAL> *layer =
new DenseLayer<REAL>(n_inputs, n_hypercolumns, n_minicolumns);
layer->taupdt = taupdt;
layer->initial_Ci = initial_Ci;
layer->initial_Cj = initial_Cj;
layer->initial_Cij = initial_Cij;
network.layers_.push_back(layer);
}
void add_plastic_layer(size_t n_inputs, size_t n_hypercolumns,
size_t n_minicolumns, REAL taupdt, REAL pmin,
REAL khalf, REAL taubdt, REAL initial_Ci,
REAL initial_Cj, REAL initial_Cij) {
MaskedDenseLayer<REAL> *layer =
new MaskedDenseLayer<REAL>(n_inputs, n_hypercolumns, n_minicolumns);
layer->taupdt = taupdt;
layer->pmin = pmin;
layer->khalf = khalf;
layer->taubdt = taubdt;
layer->initial_Ci = initial_Ci;
layer->initial_Cj = initial_Cj;
layer->initial_Cij = initial_Cij;
network.layers_.push_back(layer);
}
void initiate_training(py::array_t<REAL> py_images,
py::array_t<REAL> py_labels) {
py::buffer_info images_buffer = py_images.request();
py::buffer_info labels_buffer = py_labels.request();
dataset.number_of_examples = images_buffer.shape[0];
dataset.rows = 1; // TODO: Currently only occurs as rows * cols in the code
dataset.cols = images_buffer.shape[1];
dataset.number_of_classes = labels_buffer.shape[1];
dataset.one_hot_label = true;
dataset.images =
new REAL[dataset.number_of_examples * dataset.rows * dataset.cols];
dataset.labels =
new REAL[dataset.number_of_examples * dataset.number_of_classes];
memcpy(dataset.images, images_buffer.ptr,
dataset.number_of_examples * dataset.rows * dataset.cols *
sizeof(REAL));
memcpy(
dataset.labels, labels_buffer.ptr,
dataset.number_of_examples * dataset.number_of_classes * sizeof(REAL));
loader = new dataloader<REAL>(dataset, 8, 2);
}
void train_layer(size_t maximal_batch_size, size_t layer, size_t epochs) {
network.train_layer(*loader, maximal_batch_size, layer, epochs);
}
void training_done() {
loader->stop();
delete loader;
loader = nullptr;
delete[] dataset.images;
delete[] dataset.labels;
}
double evaluate(py::array_t<REAL> py_images, py::array_t<REAL> py_labels,
size_t batch_size) {
py::buffer_info images_buffer = py_images.request();
py::buffer_info labels_buffer = py_labels.request();
dataset_t<REAL, REAL> testset;
testset.number_of_examples = images_buffer.shape[0];
testset.rows = 1; // TODO: Currently only occurs as rows * cols in the code
testset.cols = images_buffer.shape[1];
testset.number_of_classes = labels_buffer.shape[1];
testset.one_hot_label = true;
testset.images =
new REAL[testset.number_of_examples * testset.rows * testset.cols];
testset.labels =
new REAL[testset.number_of_examples * testset.number_of_classes];
memcpy(testset.images, images_buffer.ptr,
testset.number_of_examples * testset.rows * testset.cols *
sizeof(REAL));
memcpy(
testset.labels, labels_buffer.ptr,
testset.number_of_examples * testset.number_of_classes * sizeof(REAL));
return network.evaluate(testset, batch_size);
}
Network<REAL> network;
dataset_t<REAL, REAL> dataset;
dataloader<REAL> *loader;
};
PYBIND11_MODULE(_bcpnn_backend_full_cuda_internals, m) {
m.def("initialize", []() {
seed_generator(generator);
CUBLAS_CALL(hipblasCreate(&handle));
CURAND_CALL(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
});
py::class_<PyNetwork<float>>(m, "PyNetwork_float32")
.def(py::init<>())
.def("add_dense_layer", &PyNetwork<float>::add_dense_layer)
.def("add_plastic_layer", &PyNetwork<float>::add_plastic_layer)
.def("initiate_training", &PyNetwork<float>::initiate_training)
.def("train_layer", &PyNetwork<float>::train_layer)
.def("training_done", &PyNetwork<float>::training_done)
.def("evaluate", &PyNetwork<float>::evaluate);
py::class_<PyNetwork<double>>(m, "PyNetwork_float64")
.def(py::init<>())
.def("add_dense_layer", &PyNetwork<double>::add_dense_layer)
.def("add_plastic_layer", &PyNetwork<double>::add_plastic_layer)
.def("initiate_training", &PyNetwork<double>::initiate_training)
.def("train_layer", &PyNetwork<double>::train_layer)
.def("training_done", &PyNetwork<double>::training_done)
.def("evaluate", &PyNetwork<double>::evaluate);
}
| 346c1a196d50ff6355023d8b76ca84144f28d3d9.cu | #include <cublas_v2.h>
#include <cuda.h>
#include <curand.h>
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include <unistd.h>
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <fstream>
#include <iostream>
#include <random>
#include <sstream>
#include "dataloader.h"
#include "dataset.h"
#include "helpers_cuda.h"
#include "helpers_random.h"
#include "helpers_training.h"
#include "kernels_cuda.h"
#define ACCURACY_CPU_DOUBLE_CHECK
std::default_random_engine generator;
curandGenerator_t gen;
cublasHandle_t handle;
using namespace bcpnn::helpers::cuda;
using namespace bcpnn::helpers::training;
using namespace bcpnn::helpers::random;
using namespace bcpnn::kernels::cuda;
using namespace bcpnn;
//#define cudaMalloc cudaMallocManaged
template <typename REAL>
class SupervisedInput {
public:
REAL *inputs;
REAL *labels;
size_t count;
};
template <typename REAL>
class UnsupervisedInput {
public:
REAL *inputs;
size_t count;
};
template <typename REAL>
class Allocation {
public:
virtual ~Allocation() {}
REAL *activation;
};
template <typename REAL>
class Layer {
public:
virtual void compute_batch(Allocation<REAL> *alloc,
UnsupervisedInput<REAL> *inputs) = 0;
virtual void train_batch(Allocation<REAL> *alloc,
SupervisedInput<REAL> *inputs) = 0;
virtual void train_batch(Allocation<REAL> *alloc,
UnsupervisedInput<REAL> *inputs) = 0;
virtual void train_finalize(Allocation<REAL> *alloc) = 0;
virtual Allocation<REAL> *allocate_compute(size_t maximal_batch_size) = 0;
virtual Allocation<REAL> *allocate_training(size_t maximal_batch_size) = 0;
};
template <typename REAL>
class MaskedDenseLayer;
template <typename REAL>
class MaskedDenseLayerComputeAllocation;
template <typename REAL>
class MaskedDenseLayerTrainingAllocation;
template <typename REAL>
class MaskedDenseLayer : public Layer<REAL> {
public:
MaskedDenseLayer(size_t n_inputs, size_t n_hypercolumns,
size_t n_minicolumns);
void compute_batch(Allocation<REAL> *alloc, UnsupervisedInput<REAL> *inputs);
void train_batch(Allocation<REAL> *alloc, SupervisedInput<REAL> *inputs);
void train_batch(Allocation<REAL> *alloc, UnsupervisedInput<REAL> *inputs);
void train_finalize(Allocation<REAL> *alloc);
Allocation<REAL> *allocate_compute(size_t maximal_batch_size);
Allocation<REAL> *allocate_training(size_t maximal_batch_size);
REAL *weights;
REAL *bias;
REAL taupdt;
REAL khalf;
REAL pmin;
REAL taubdt;
REAL initial_Ci;
REAL initial_Cj;
REAL initial_Cij;
size_t n_inputs_;
size_t n_outputs_;
size_t n_hypercolumns_;
size_t n_minicolumns_;
};
template <typename REAL>
class MaskedDenseLayerComputeAllocation : public Allocation<REAL> {
public:
};
template <typename REAL>
class MaskedDenseLayerTrainingAllocation : public Allocation<REAL> {
public:
MaskedDenseLayerComputeAllocation<REAL> *to_compute_allocation();
REAL *Ci;
REAL *Cj;
REAL *Cij;
REAL *kbi;
uint8_t *wmask;
size_t update_hypercolumn;
std::vector<size_t> hc_permutation;
size_t hc_pos;
};
template <typename REAL>
MaskedDenseLayer<REAL>::MaskedDenseLayer(size_t n_inputs, size_t n_hypercolumns,
size_t n_minicolumns)
: weights(NULL),
bias(NULL),
n_inputs_(n_inputs),
n_outputs_(n_hypercolumns * n_minicolumns),
n_hypercolumns_(n_hypercolumns),
n_minicolumns_(n_minicolumns) {
CUDA_CALL(
cudaMalloc((void **)&weights, n_inputs_ * n_outputs_ * sizeof(REAL)));
CUDA_CALL(cudaMalloc((void **)&bias, n_outputs_ * sizeof(REAL)));
CURAND_CALL(TcurandGenerateNormal<REAL>(gen, weights, n_inputs_ * n_outputs_,
0, 0.1));
CURAND_CALL(TcurandGenerateUniform<REAL>(gen, bias, n_outputs_));
cuda_scale_array<REAL>(bias, 0.1, n_outputs_);
}
template <typename REAL>
Allocation<REAL> *MaskedDenseLayer<REAL>::allocate_compute(
size_t maximal_batch_size) {
MaskedDenseLayerComputeAllocation<REAL> *alloc =
new MaskedDenseLayerComputeAllocation<REAL>;
CUDA_CALL(cudaMalloc((void **)&alloc->activation,
maximal_batch_size * n_outputs_ * sizeof(REAL)));
return alloc;
}
template <typename REAL>
Allocation<REAL> *MaskedDenseLayer<REAL>::allocate_training(
size_t maximal_batch_size) {
MaskedDenseLayerTrainingAllocation<REAL> *alloc =
new MaskedDenseLayerTrainingAllocation<REAL>;
CUDA_CALL(cudaMalloc((void **)&alloc->activation,
maximal_batch_size * n_outputs_ * sizeof(REAL)));
CUDA_CALL(cudaMalloc((void **)&alloc->Ci, n_inputs_ * sizeof(REAL)));
CUDA_CALL(cudaMalloc((void **)&alloc->Cj, n_outputs_ * sizeof(REAL)));
CUDA_CALL(
cudaMalloc((void **)&alloc->Cij, n_inputs_ * n_outputs_ * sizeof(REAL)));
CUDA_CALL(cudaMalloc((void **)&alloc->wmask,
n_inputs_ * n_hypercolumns_ * sizeof(uint8_t)));
CUDA_CALL(cudaMalloc((void **)&alloc->kbi, n_outputs_ * sizeof(REAL)));
cuda_initialize_array<REAL>(alloc->Ci, initial_Ci, n_inputs_);
cuda_initialize_array<REAL>(alloc->Cj, initial_Cj, n_outputs_);
cuda_initialize_array<REAL>(alloc->Cij, initial_Cij, n_inputs_ * n_outputs_);
cuda_initialize_array<REAL>(alloc->kbi, 1, n_outputs_);
initialize_wmask(alloc->wmask, n_inputs_, n_hypercolumns_);
alloc->update_hypercolumn = 0;
for (size_t i = 0; i < n_hypercolumns_; ++i) {
alloc->hc_permutation.push_back(i);
}
std::shuffle(alloc->hc_permutation.begin(), alloc->hc_permutation.end(),
generator);
return alloc;
}
template <typename REAL>
void MaskedDenseLayer<REAL>::compute_batch(Allocation<REAL> *alloc_,
UnsupervisedInput<REAL> *inputs) {
MaskedDenseLayerComputeAllocation<REAL> *alloc =
(MaskedDenseLayerComputeAllocation<REAL> *)alloc_;
REAL v_one = 1;
REAL v_zero = 0;
CUBLAS_CALL(cublasgemm<REAL>(handle, CUBLAS_OP_N, CUBLAS_OP_N, n_outputs_,
inputs->count, n_inputs_, &v_one, this->weights,
n_outputs_, inputs->inputs, n_inputs_, &v_zero,
alloc->activation, n_outputs_));
cuda_add_bias(alloc->activation, inputs->count, n_outputs_, this->bias);
cuda_softmax(alloc->activation, inputs->count * n_hypercolumns_,
n_minicolumns_);
}
template <typename REAL>
void MaskedDenseLayer<REAL>::train_batch(Allocation<REAL> *alloc,
SupervisedInput<REAL> *inputs) {
std::cerr << "MaskedDenseLayer supervised training unimplemented"
<< std::endl;
exit(1);
/*
float v_one = 1;
float v_zero = 0;
CUBLAS_CALL(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n_outputs_,
inputs->count, n_inputs_, &v_one, this->weights, n_outputs_, inputs->inputs,
n_inputs_, &v_zero, alloc->activation, n_outputs_));
cuda_add_bias(alloc->activation, inputs->count, n_outputs_, this->bias);
cuda_softmax(alloc->activation, inputs->count * n_hypercolumns, n_minicolumns);
cuda_update_counters(alloc->Ci, alloc->Cj, alloc->Cij, inputs->inputs,
alloc->activation, inputs->count, n_inputs_, n_outputs_, taupdt); if
(hypercolumn < n_hypercolumns) { cuda_update_weights(W1, Ci, Cj, Cij, taupdt/2,
n_inputs_, n_outputs_); cuda_update_bias_regularized(B1, kbi, Cj, taupdt/2,
khalf, pmin, taubdt, n_outputs_); cuda_update_mask(wmask, W1, Ci, Cj, Cij,
taupdt/2, n_inputs_, n_outputs_, hypercolumn, n_hypercolumns, n_minicolumns,
16); cuda_apply_mask(W1, wmask, n_inputs_, n_outputs_, n_hypercolumns,
n_minicolumns);
}
*/
}
template <typename REAL>
void MaskedDenseLayer<REAL>::train_batch(Allocation<REAL> *alloc_,
UnsupervisedInput<REAL> *inputs) {
MaskedDenseLayerTrainingAllocation<REAL> *alloc =
(MaskedDenseLayerTrainingAllocation<REAL> *)alloc_;
REAL v_one = 1;
REAL v_zero = 0;
CUBLAS_CALL(cublasgemm<REAL>(handle, CUBLAS_OP_N, CUBLAS_OP_N, n_outputs_,
inputs->count, n_inputs_, &v_one, this->weights,
n_outputs_, inputs->inputs, n_inputs_, &v_zero,
alloc->activation, n_outputs_));
cuda_add_bias(alloc->activation, inputs->count, n_outputs_, this->bias);
cuda_softmax(alloc->activation, inputs->count * n_hypercolumns_,
n_minicolumns_);
cuda_update_counters(alloc->Ci, alloc->Cj, alloc->Cij, inputs->inputs,
alloc->activation, inputs->count, n_inputs_, n_outputs_,
taupdt);
cuda_update_weights(this->weights, alloc->Ci, alloc->Cj, alloc->Cij,
taupdt / 2, n_inputs_, n_outputs_);
cuda_update_bias_regularized(this->bias, alloc->kbi, alloc->Cj, taupdt / 2,
khalf, pmin, taubdt, n_outputs_);
if (alloc->update_hypercolumn < n_hypercolumns_) {
cuda_update_mask(alloc->wmask, this->weights, alloc->Ci, alloc->Cj,
alloc->Cij, taupdt / 2, n_inputs_, n_outputs_,
alloc->update_hypercolumn, n_hypercolumns_, n_minicolumns_,
16);
}
cuda_apply_mask(this->weights, alloc->wmask, n_inputs_, n_outputs_,
n_hypercolumns_, n_minicolumns_);
}
template <typename REAL>
void MaskedDenseLayer<REAL>::train_finalize(Allocation<REAL> *alloc) {}
template <typename REAL>
class DenseLayer;
template <typename REAL>
class DenseLayerComputeAllocation;
template <typename REAL>
class DenseLayerTrainingAllocation;
template <typename REAL>
class DenseLayer : public Layer<REAL> {
public:
DenseLayer(size_t n_inputs, size_t n_hypercolumns, size_t n_minicolumns);
void compute_batch(Allocation<REAL> *alloc, UnsupervisedInput<REAL> *inputs);
void train_batch(Allocation<REAL> *alloc, SupervisedInput<REAL> *inputs);
void train_batch(Allocation<REAL> *alloc, UnsupervisedInput<REAL> *inputs);
void train_finalize(Allocation<REAL> *alloc);
Allocation<REAL> *allocate_compute(size_t maximal_batch_size);
Allocation<REAL> *allocate_training(size_t maximal_batch_size);
REAL *weights;
REAL *bias;
REAL taupdt;
REAL initial_Ci;
REAL initial_Cj;
REAL initial_Cij;
size_t n_inputs_;
size_t n_outputs_;
size_t n_hypercolumns_;
size_t n_minicolumns_;
};
template <typename REAL>
class DenseLayerComputeAllocation : public Allocation<REAL> {
public:
};
template <typename REAL>
class DenseLayerTrainingAllocation : public Allocation<REAL> {
public:
DenseLayerComputeAllocation<REAL> *to_compute_allocation();
REAL *Ci;
REAL *Cj;
REAL *Cij;
REAL *kbi;
uint8_t *wmask;
};
template <typename REAL>
DenseLayer<REAL>::DenseLayer(size_t n_inputs, size_t n_hypercolumns,
size_t n_minicolumns)
: weights(NULL),
bias(NULL),
n_inputs_(n_inputs),
n_outputs_(n_hypercolumns * n_minicolumns),
n_hypercolumns_(n_hypercolumns),
n_minicolumns_(n_minicolumns) {
CUDA_CALL(
cudaMalloc((void **)&weights, n_inputs_ * n_outputs_ * sizeof(REAL)));
CUDA_CALL(cudaMalloc((void **)&bias, n_outputs_ * sizeof(REAL)));
}
template <typename REAL>
Allocation<REAL> *DenseLayer<REAL>::allocate_compute(
size_t maximal_batch_size) {
DenseLayerComputeAllocation<REAL> *alloc =
new DenseLayerComputeAllocation<REAL>;
CUDA_CALL(cudaMalloc((void **)&alloc->activation,
maximal_batch_size * n_outputs_ * sizeof(REAL)));
return alloc;
}
template <typename REAL>
Allocation<REAL> *DenseLayer<REAL>::allocate_training(
size_t maximal_batch_size) {
DenseLayerTrainingAllocation<REAL> *alloc =
new DenseLayerTrainingAllocation<REAL>;
CUDA_CALL(cudaMalloc((void **)&alloc->activation,
maximal_batch_size * n_outputs_ * sizeof(REAL)));
CUDA_CALL(cudaMalloc((void **)&alloc->Ci, n_inputs_ * sizeof(REAL)));
CUDA_CALL(cudaMalloc((void **)&alloc->Cj, n_outputs_ * sizeof(REAL)));
CUDA_CALL(
cudaMalloc((void **)&alloc->Cij, n_inputs_ * n_outputs_ * sizeof(REAL)));
cuda_initialize_array<REAL>(alloc->Ci, initial_Ci, n_inputs_);
cuda_initialize_array<REAL>(alloc->Cj, initial_Cj, n_outputs_);
cuda_initialize_array<REAL>(alloc->Cij, initial_Cij, n_inputs_ * n_outputs_);
return alloc;
}
template <typename REAL>
void DenseLayer<REAL>::compute_batch(Allocation<REAL> *alloc_,
UnsupervisedInput<REAL> *inputs) {
DenseLayerComputeAllocation<REAL> *alloc =
(DenseLayerComputeAllocation<REAL> *)alloc_;
REAL v_one = 1;
REAL v_zero = 0;
CUBLAS_CALL(cublasgemm<REAL>(handle, CUBLAS_OP_N, CUBLAS_OP_N, n_outputs_,
inputs->count, n_inputs_, &v_one, this->weights,
n_outputs_, inputs->inputs, n_inputs_, &v_zero,
alloc->activation, n_outputs_));
cuda_add_bias(alloc->activation, inputs->count, n_outputs_, this->bias);
cuda_softmax(alloc->activation, inputs->count * n_hypercolumns_,
n_minicolumns_);
}
template <typename REAL>
void DenseLayer<REAL>::train_batch(Allocation<REAL> *alloc_,
SupervisedInput<REAL> *inputs) {
DenseLayerTrainingAllocation<REAL> *alloc =
(DenseLayerTrainingAllocation<REAL> *)alloc_;
REAL v_one = 1;
REAL v_zero = 0;
CUBLAS_CALL(cublasgemm<REAL>(handle, CUBLAS_OP_N, CUBLAS_OP_N, n_outputs_,
inputs->count, n_inputs_, &v_one, this->weights,
n_outputs_, inputs->inputs, n_inputs_, &v_zero,
alloc->activation, n_outputs_));
cuda_add_bias(alloc->activation, inputs->count, n_outputs_, this->bias);
cuda_softmax(alloc->activation, inputs->count * n_hypercolumns_,
n_minicolumns_);
cuda_update_counters(alloc->Ci, alloc->Cj, alloc->Cij, inputs->inputs,
inputs->labels, inputs->count, n_inputs_, n_outputs_,
taupdt);
}
template <typename REAL>
void DenseLayer<REAL>::train_batch(Allocation<REAL> *alloc,
UnsupervisedInput<REAL> *inputs) {
std::cerr << "DenseLayer unsupervised training unimplemented" << std::endl;
exit(1);
#if 0
float v_one = 1;
float v_zero = 0;
CUBLAS_CALL(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n_outputs_, inputs->count, n_inputs, &v_one, this->weights, n_outputs_, inputs->inputs, n_inputs, &v_zero, alloc->activation, n_outputs_));
cuda_add_bias(alloc->activation, inputs->count, n_outputs_, this->bias);
cuda_softmax(alloc->activation, inputs->count * n_hypercolumns, n_minicolumns);
cuda_update_counters(alloc->Ci, alloc->Cj, alloc->Cij, inputs->inputs, alloc->activation, inputs->count, n_inputs, n_outputs_, taupdt);
cuda_update_weights(this->weights, alloc->Ci, alloc->Cj, alloc->Cij, taupdt/2, n_inputs, n_outputs_);
cuda_update_bias_regularized(this->bias, this->bias, this->bias, taupdt/2, khalf, pmin, taubdt, n_outputs_);
size_t hypercolumn = n_hypercolumns;
if (hypercolumn < n_hypercolumns) {
cuda_update_mask(this->bias, this->weights, alloc->Ci, alloc->Cj, alloc->Cij, taupdt/2, n_inputs, n_outputs_, hypercolumn, n_hypercolumns, n_minicolumns, 16);
}
cuda_apply_mask(this->weights, this->weights, n_inputs, n_outputs_, n_hypercolumns, n_minicolumns);
#endif
}
template <typename REAL>
void DenseLayer<REAL>::train_finalize(Allocation<REAL> *alloc_) {
DenseLayerTrainingAllocation<REAL> *alloc =
(DenseLayerTrainingAllocation<REAL> *)alloc_;
cuda_update_weights(this->weights, alloc->Ci, alloc->Cj, alloc->Cij,
taupdt / 2, n_inputs_, n_outputs_);
cuda_update_bias(this->bias, alloc->Cj, taupdt / 2, n_outputs_);
}
template <typename REAL>
class Network {
public:
// void add_layer(Layer * layer);
void train_layer(dataloader<REAL> &loader, size_t maximal_batch_size,
size_t layer, size_t epochs);
double evaluate(dataset_t<REAL, REAL> &dataset, size_t maximal_batch_size);
std::vector<Layer<REAL> *> layers_;
};
template <typename REAL>
void Network<REAL>::train_layer(dataloader<REAL> &loader,
size_t maximal_batch_size, size_t layer,
size_t epochs) {
std::vector<Allocation<REAL> *> allocs;
for (size_t i = 0; i <= layer; ++i) {
if (i < layer) {
allocs.push_back(layers_[i]->allocate_compute(maximal_batch_size));
} else {
allocs.push_back(layers_[i]->allocate_training(maximal_batch_size));
}
}
for (size_t epoch = 0; epoch < epochs; ++epoch) {
std::pair<REAL *, REAL *> p = loader.queue_get_fresh();
size_t pos = 0;
size_t n_inputs = loader.get_dataset().rows * loader.get_dataset().cols;
size_t n_outputs = loader.get_dataset().number_of_classes;
size_t n_steps =
(loader.get_dataset().number_of_examples + maximal_batch_size - 1) /
maximal_batch_size;
for (size_t step = 0; step < n_steps; ++step) {
REAL *batch_images = p.first + (pos * n_inputs);
REAL *batch_labels = p.second + (pos * n_outputs);
size_t batch_size_step = min(
maximal_batch_size, loader.get_dataset().number_of_examples - pos);
for (size_t l = 0; l <= layer; ++l) {
MaskedDenseLayerTrainingAllocation<REAL> *dense_training_alloc =
dynamic_cast<MaskedDenseLayerTrainingAllocation<REAL> *>(allocs[l]);
MaskedDenseLayer<REAL> *dense_layer =
dynamic_cast<MaskedDenseLayer<REAL> *>(layers_[l]);
if (dense_layer != nullptr && dense_training_alloc != nullptr) {
size_t n_hypercolumns = dense_layer->n_hypercolumns_;
size_t h =
(epoch > 0 && (step % (n_steps / (n_hypercolumns + 1)) == 0))
? step / (n_steps / (n_hypercolumns + 1))
: n_hypercolumns;
dense_training_alloc->update_hypercolumn = h;
}
if (l + 1 != layers_.size()) {
UnsupervisedInput<REAL> inputs;
inputs.inputs = l == 0 ? batch_images : allocs[l - 1]->activation;
inputs.count = batch_size_step;
if (l == layer) {
layers_[l]->train_batch(allocs[l], &inputs);
} else {
layers_[l]->compute_batch(allocs[l], &inputs);
}
} else {
SupervisedInput<REAL> inputs;
inputs.inputs = l == 0 ? batch_images : allocs[l - 1]->activation;
inputs.labels = batch_labels;
inputs.count = batch_size_step;
layers_[l]->train_batch(allocs[l], &inputs);
}
}
pos += batch_size_step;
}
loader.queue_recycle(p);
if (layer == 0) {
MaskedDenseLayer<REAL> *layer =
dynamic_cast<MaskedDenseLayer<REAL> *>(layers_[0]);
MaskedDenseLayerTrainingAllocation<REAL> *alloc =
dynamic_cast<MaskedDenseLayerTrainingAllocation<REAL> *>(allocs[0]);
if (layer && alloc) {
print_wmask(alloc->wmask, 28, 28, layer->n_hypercolumns_);
}
printf("\nLayer 1/%lu - Epoch : %ld\n\n", layers_.size(), epoch);
} else {
printf("\nLayer %lu/%lu - Epoch : %ld\n\n", layer + 1, layers_.size(),
epoch);
}
}
layers_[layer]->train_finalize(allocs[layer]);
}
template <typename REAL>
double Network<REAL>::evaluate(dataset_t<REAL, REAL> &dataset,
size_t maximal_batch_size) {
std::vector<Allocation<REAL> *> allocs;
int *correct;
REAL *test_images;
REAL *test_labels;
CUDA_CALL(cudaMalloc((void **)&correct, sizeof(int)));
CUDA_CALL(cudaMemset(correct, 0, sizeof(int)));
#ifdef ACCURACY_CPU_DOUBLE_CHECK
// cpu_correct = 0;
#endif
size_t n_inputs = dataset.rows * dataset.cols;
size_t n_outputs = dataset.number_of_classes;
CUDA_CALL(cudaMalloc((void **)&test_images,
dataset.number_of_examples * n_inputs * sizeof(REAL)));
CUDA_CALL(cudaMalloc((void **)&test_labels,
dataset.number_of_examples * n_outputs * sizeof(REAL)));
CUDA_CALL(cudaMemcpy(test_images, dataset.images,
dataset.number_of_examples * n_inputs * sizeof(REAL),
cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(test_labels, dataset.labels,
dataset.number_of_examples * n_outputs * sizeof(REAL),
cudaMemcpyHostToDevice));
for (size_t l = 0; l < layers_.size(); ++l) {
allocs.push_back(layers_[l]->allocate_compute(maximal_batch_size));
}
size_t pos = 0;
for (size_t step = 0;
step < (dataset.number_of_examples + maximal_batch_size - 1) /
maximal_batch_size;
++step) {
REAL *batch_images = test_images + (pos * n_inputs);
REAL *batch_labels = test_labels + (pos * n_outputs);
size_t batch_size_step =
min(maximal_batch_size, dataset.number_of_examples - pos);
for (size_t l = 0; l < layers_.size(); ++l) {
UnsupervisedInput<REAL> inputs;
inputs.inputs =
l == 0 ? batch_images
: ((MaskedDenseLayerComputeAllocation<REAL> *)allocs[l - 1])
->activation;
inputs.count = batch_size_step;
layers_[l]->compute_batch(allocs[l], &inputs);
}
cuda_correct_predictions(
correct,
((DenseLayerComputeAllocation<REAL> *)allocs[layers_.size() - 1])
->activation,
batch_labels, batch_size_step, n_outputs);
pos += batch_size_step;
}
int h_correct;
CUDA_CALL(
cudaMemcpy(&h_correct, correct, sizeof(int), cudaMemcpyDeviceToHost));
#if 0
#ifdef ACCURACY_CPU_DOUBLE_CHECK
if (cpu_correct != h_correct) {
std::cerr << "CPU and GPU differ on number of correctly predicted images" << std::endl;
exit(1);
}
#endif
#endif
return ((double)h_correct) / dataset.number_of_examples;
}
namespace py = pybind11;
template <typename REAL>
class PyNetwork {
public:
PyNetwork() {}
void add_dense_layer(size_t n_inputs, size_t n_hypercolumns,
size_t n_minicolumns, REAL taupdt, REAL initial_Ci,
REAL initial_Cj, REAL initial_Cij) {
DenseLayer<REAL> *layer =
new DenseLayer<REAL>(n_inputs, n_hypercolumns, n_minicolumns);
layer->taupdt = taupdt;
layer->initial_Ci = initial_Ci;
layer->initial_Cj = initial_Cj;
layer->initial_Cij = initial_Cij;
network.layers_.push_back(layer);
}
void add_plastic_layer(size_t n_inputs, size_t n_hypercolumns,
size_t n_minicolumns, REAL taupdt, REAL pmin,
REAL khalf, REAL taubdt, REAL initial_Ci,
REAL initial_Cj, REAL initial_Cij) {
MaskedDenseLayer<REAL> *layer =
new MaskedDenseLayer<REAL>(n_inputs, n_hypercolumns, n_minicolumns);
layer->taupdt = taupdt;
layer->pmin = pmin;
layer->khalf = khalf;
layer->taubdt = taubdt;
layer->initial_Ci = initial_Ci;
layer->initial_Cj = initial_Cj;
layer->initial_Cij = initial_Cij;
network.layers_.push_back(layer);
}
void initiate_training(py::array_t<REAL> py_images,
py::array_t<REAL> py_labels) {
py::buffer_info images_buffer = py_images.request();
py::buffer_info labels_buffer = py_labels.request();
dataset.number_of_examples = images_buffer.shape[0];
dataset.rows = 1; // TODO: Currently only occurs as rows * cols in the code
dataset.cols = images_buffer.shape[1];
dataset.number_of_classes = labels_buffer.shape[1];
dataset.one_hot_label = true;
dataset.images =
new REAL[dataset.number_of_examples * dataset.rows * dataset.cols];
dataset.labels =
new REAL[dataset.number_of_examples * dataset.number_of_classes];
memcpy(dataset.images, images_buffer.ptr,
dataset.number_of_examples * dataset.rows * dataset.cols *
sizeof(REAL));
memcpy(
dataset.labels, labels_buffer.ptr,
dataset.number_of_examples * dataset.number_of_classes * sizeof(REAL));
loader = new dataloader<REAL>(dataset, 8, 2);
}
void train_layer(size_t maximal_batch_size, size_t layer, size_t epochs) {
network.train_layer(*loader, maximal_batch_size, layer, epochs);
}
void training_done() {
loader->stop();
delete loader;
loader = nullptr;
delete[] dataset.images;
delete[] dataset.labels;
}
double evaluate(py::array_t<REAL> py_images, py::array_t<REAL> py_labels,
size_t batch_size) {
py::buffer_info images_buffer = py_images.request();
py::buffer_info labels_buffer = py_labels.request();
dataset_t<REAL, REAL> testset;
testset.number_of_examples = images_buffer.shape[0];
testset.rows = 1; // TODO: Currently only occurs as rows * cols in the code
testset.cols = images_buffer.shape[1];
testset.number_of_classes = labels_buffer.shape[1];
testset.one_hot_label = true;
testset.images =
new REAL[testset.number_of_examples * testset.rows * testset.cols];
testset.labels =
new REAL[testset.number_of_examples * testset.number_of_classes];
memcpy(testset.images, images_buffer.ptr,
testset.number_of_examples * testset.rows * testset.cols *
sizeof(REAL));
memcpy(
testset.labels, labels_buffer.ptr,
testset.number_of_examples * testset.number_of_classes * sizeof(REAL));
return network.evaluate(testset, batch_size);
}
Network<REAL> network;
dataset_t<REAL, REAL> dataset;
dataloader<REAL> *loader;
};
PYBIND11_MODULE(_bcpnn_backend_full_cuda_internals, m) {
m.def("initialize", []() {
seed_generator(generator);
CUBLAS_CALL(cublasCreate(&handle));
CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
});
py::class_<PyNetwork<float>>(m, "PyNetwork_float32")
.def(py::init<>())
.def("add_dense_layer", &PyNetwork<float>::add_dense_layer)
.def("add_plastic_layer", &PyNetwork<float>::add_plastic_layer)
.def("initiate_training", &PyNetwork<float>::initiate_training)
.def("train_layer", &PyNetwork<float>::train_layer)
.def("training_done", &PyNetwork<float>::training_done)
.def("evaluate", &PyNetwork<float>::evaluate);
py::class_<PyNetwork<double>>(m, "PyNetwork_float64")
.def(py::init<>())
.def("add_dense_layer", &PyNetwork<double>::add_dense_layer)
.def("add_plastic_layer", &PyNetwork<double>::add_plastic_layer)
.def("initiate_training", &PyNetwork<double>::initiate_training)
.def("train_layer", &PyNetwork<double>::train_layer)
.def("training_done", &PyNetwork<double>::training_done)
.def("evaluate", &PyNetwork<double>::evaluate);
}
|
abe5b1d9f5c522b0c778e134fe3f8de554dada4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels/cpu/dragon/bbox_utils.h"
#include "core/ieee754_float.h"
#include "kernels/common/third/dragon.h"
#ifdef TS_USE_CUDA_FP16
#include "kernels/gpu/cudax_fp16_math.h"
#endif
#include "kernels/gpu/gpu_kernel.h"
namespace ts {
namespace dragon {
namespace rcnn {
/******************** BBox ********************/
template<typename T>
__device__ int _BBoxTransform(
const T dx,
const T dy,
const T d_log_w,
const T d_log_h,
const T im_w,
const T im_h,
const T min_box_w,
const T min_box_h,
T *bbox) {
const T w = bbox[2] - bbox[0] + (T) 1;
const T h = bbox[3] - bbox[1] + (T) 1;
const T ctr_x = bbox[0] + (T) 0.5 * w;
const T ctr_y = bbox[1] + (T) 0.5 * h;
const T pred_ctr_x = dx * w + ctr_x;
const T pred_ctr_y = dy * h + ctr_y;
const T pred_w = exp(d_log_w) * w;
const T pred_h = exp(d_log_h) * h;
bbox[0] = pred_ctr_x - (T) 0.5 * pred_w;
bbox[1] = pred_ctr_y - (T) 0.5 * pred_h;
bbox[2] = pred_ctr_x + (T) 0.5 * pred_w;
bbox[3] = pred_ctr_y + (T) 0.5 * pred_h;
bbox[0] = max((T) 0, min(bbox[0], im_w - (T) 1));
bbox[1] = max((T) 0, min(bbox[1], im_h - (T) 1));
bbox[2] = max((T) 0, min(bbox[2], im_w - (T) 1));
bbox[3] = max((T) 0, min(bbox[3], im_h - (T) 1));
const T box_w = bbox[2] - bbox[0] + (T) 1;
const T box_h = bbox[3] - bbox[1] + (T) 1;
return (box_w >= min_box_w) * (box_h >= min_box_h);
}
/******************** Proposal ********************/
template<typename T>
__global__ void _GenerateProposals(
const int nthreads,
const int A,
const int feat_h,
const int feat_w,
const int stride,
const float im_h,
const float im_w,
const float min_box_h,
const float min_box_w,
const T *scores,
const T *bbox_deltas,
const T *anchors,
T *proposals) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
const int h = idx / A / feat_w;
const int w = (idx / A) % feat_w;
const int a = idx % A;
const T x = w * stride;
const T y = h * stride;
const T *bbox_delta = bbox_deltas + h * feat_w + w;
const T *score = scores + h * feat_w + w;
const int K = feat_h * feat_w;
const T dx = bbox_delta[(a * 4 + 0) * K];
const T dy = bbox_delta[(a * 4 + 1) * K];
const T d_log_w = bbox_delta[(a * 4 + 2) * K];
const T d_log_h = bbox_delta[(a * 4 + 3) * K];
T *proposal = proposals + idx * 5;
proposal[0] = x + anchors[a * 4 + 0];
proposal[1] = y + anchors[a * 4 + 1];
proposal[2] = x + anchors[a * 4 + 2];
proposal[3] = y + anchors[a * 4 + 3];
proposal[4] = _BBoxTransform(
dx, dy, d_log_w, d_log_h,
im_w, im_h, min_box_w, min_box_h,
proposal) * score[a * K];
}
}
template<>
void GenerateProposals<float, CUDAContext>(
const int A,
const int feat_h,
const int feat_w,
const int stride,
const float im_h,
const float im_w,
const float min_box_h,
const float min_box_w,
const float *scores,
const float *bbox_deltas,
const float *anchors,
float *proposals,
CUDAContext *ctx) {
const auto num_proposals = A * feat_h * feat_w;
RUN_KERNEL_STREAM(_GenerateProposals<float>,
CUDA_BLOCKS(num_proposals), CUDA_THREADS,
0, ctx->cuda_stream(),
num_proposals, A, feat_h, feat_w, stride,
im_h, im_w, min_box_h, min_box_w,
scores, bbox_deltas, anchors, proposals);
}
template<typename T>
__global__ void _GenerateProposals_v2(
const int nthreads,
const float im_h,
const float im_w,
const float min_box_h,
const float min_box_w,
const T *scores,
const T *bbox_deltas,
T *proposals) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
const float dx = bbox_deltas[idx];
const float dy = bbox_deltas[nthreads + idx];
const float d_log_w = bbox_deltas[2 * nthreads + idx];
const float d_log_h = bbox_deltas[3 * nthreads + idx];
T *proposal = proposals + idx * 5;
proposal[4] = _BBoxTransform(
dx, dy, d_log_w, d_log_h,
im_w, im_h, min_box_w, min_box_h,
proposal) * scores[idx];
}
}
template<>
void GenerateProposals_v2<float, CUDAContext>(
const int total_anchors,
const float im_h,
const float im_w,
const float min_box_h,
const float min_box_w,
const float *scores,
const float *bbox_deltas,
float *proposals,
CUDAContext *ctx) {
RUN_KERNEL_STREAM(_GenerateProposals_v2<float>,
CUDA_BLOCKS(total_anchors), CUDA_THREADS,
0, ctx->cuda_stream(),
total_anchors, im_h, im_w, min_box_h, min_box_w,
scores, bbox_deltas, proposals);
}
/******************** NMS ********************/
#define DIV_UP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define NMS_BLOCK_SIZE 64
template<typename T>
__device__ T iou(const T *A, const T *B) {
const T x1 = max(A[0], B[0]);
const T y1 = max(A[1], B[1]);
const T x2 = min(A[2], B[2]);
const T y2 = min(A[3], B[3]);
const T width = max((T) 0, x2 - x1 + 1);
const T height = max((T) 0, y2 - y1 + 1);
const T area = width * height;
const T A_area = (A[2] - A[0] + 1) * (A[3] - A[1] + 1);
const T B_area = (B[2] - B[0] + 1) * (B[3] - B[1] + 1);
return area / (A_area + B_area - area);
}
template<typename T>
__global__ void nms_mask(
const int num_boxes,
const T nms_thresh,
const T *boxes,
uint64_t *mask) {
const int i_start = blockIdx.x * NMS_BLOCK_SIZE;
const int di_end = min(num_boxes - i_start, NMS_BLOCK_SIZE);
const int j_start = blockIdx.y * NMS_BLOCK_SIZE;
const int dj_end = min(num_boxes - j_start, NMS_BLOCK_SIZE);
const int num_blocks = DIV_UP(num_boxes, NMS_BLOCK_SIZE);
const int bid = blockIdx.x;
const int tid = threadIdx.x;
__shared__ T boxes_i[NMS_BLOCK_SIZE * 4];
if (tid < di_end) {
boxes_i[tid * 4 + 0] = boxes[(i_start + tid) * 5 + 0];
boxes_i[tid * 4 + 1] = boxes[(i_start + tid) * 5 + 1];
boxes_i[tid * 4 + 2] = boxes[(i_start + tid) * 5 + 2];
boxes_i[tid * 4 + 3] = boxes[(i_start + tid) * 5 + 3];
}
__syncthreads();
if (tid < dj_end) {
const T *const box_j = boxes + (j_start + tid) * 5;
unsigned long long mask_j = 0;
const int di_start = (i_start == j_start) ? (tid + 1) : 0;
for (int di = di_start; di < di_end; ++di)
if (iou(box_j, boxes_i + di * 4) > nms_thresh)
mask_j |= 1ULL << di;
mask[(j_start + tid) * num_blocks + bid] = mask_j;
}
}
template<typename T>
void _ApplyNMS(
const int num_boxes,
const int max_keeps,
const float thresh,
const T *boxes,
int *keep_indices,
int &num_keep,
CUDAContext *ctx) {
const int num_blocks = DIV_UP(num_boxes, NMS_BLOCK_SIZE);
const dim3 blocks(num_blocks, num_blocks);
size_t mask_nbytes = num_boxes * num_blocks * sizeof(uint64_t);
size_t boxes_nbytes = num_boxes * 5 * sizeof(T);
void *boxes_dev, *mask_dev;
CUDA_CHECK(hipMalloc(&boxes_dev, boxes_nbytes));
CUDA_CHECK(hipMalloc(&mask_dev, mask_nbytes));
CUDA_CHECK(hipMemcpy(boxes_dev, boxes,
boxes_nbytes, hipMemcpyHostToDevice));
RUN_KERNEL_STREAM(nms_mask<T>,
blocks, NMS_BLOCK_SIZE,
0, ctx->cuda_stream(), num_boxes,
thresh, (T *) boxes_dev, (uint64_t *) mask_dev);
CUDA_CHECK(hipPeekAtLastError());
std::vector<uint64_t> mask_host(num_boxes * num_blocks);
CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev,
mask_nbytes, hipMemcpyDeviceToHost));
std::vector<uint64_t> dead_bit(num_blocks);
std::memset(&dead_bit[0], 0, sizeof(uint64_t) * num_blocks);
int num_selected = 0;
for (int i = 0; i < num_boxes; ++i) {
const int nblock = i / NMS_BLOCK_SIZE;
const int inblock = i % NMS_BLOCK_SIZE;
if (!(dead_bit[nblock] & (1ULL << inblock))) {
keep_indices[num_selected++] = i;
uint64_t *mask_i = &mask_host[0] + i * num_blocks;
for (int j = nblock; j < num_blocks; ++j) dead_bit[j] |= mask_i[j];
if (num_selected == max_keeps) break;
}
}
num_keep = num_selected;
CUDA_CHECK(hipFree(mask_dev));
CUDA_CHECK(hipFree(boxes_dev));
}
template<>
void ApplyNMS<float, CUDAContext>(
const int num_boxes,
const int max_keeps,
const float thresh,
const float *boxes,
int *keep_indices,
int &num_keep,
CUDAContext *ctx) {
_ApplyNMS<float>(num_boxes, max_keeps, thresh,
boxes, keep_indices, num_keep, ctx);
}
} // namespace rcnn
} // namespace dragon
} // namespace ts
| abe5b1d9f5c522b0c778e134fe3f8de554dada4c.cu | #include "kernels/cpu/dragon/bbox_utils.h"
#include "core/ieee754_float.h"
#include "kernels/common/third/dragon.h"
#ifdef TS_USE_CUDA_FP16
#include "kernels/gpu/cudax_fp16_math.h"
#endif
#include "kernels/gpu/gpu_kernel.h"
namespace ts {
namespace dragon {
namespace rcnn {
/******************** BBox ********************/
template<typename T>
__device__ int _BBoxTransform(
const T dx,
const T dy,
const T d_log_w,
const T d_log_h,
const T im_w,
const T im_h,
const T min_box_w,
const T min_box_h,
T *bbox) {
const T w = bbox[2] - bbox[0] + (T) 1;
const T h = bbox[3] - bbox[1] + (T) 1;
const T ctr_x = bbox[0] + (T) 0.5 * w;
const T ctr_y = bbox[1] + (T) 0.5 * h;
const T pred_ctr_x = dx * w + ctr_x;
const T pred_ctr_y = dy * h + ctr_y;
const T pred_w = exp(d_log_w) * w;
const T pred_h = exp(d_log_h) * h;
bbox[0] = pred_ctr_x - (T) 0.5 * pred_w;
bbox[1] = pred_ctr_y - (T) 0.5 * pred_h;
bbox[2] = pred_ctr_x + (T) 0.5 * pred_w;
bbox[3] = pred_ctr_y + (T) 0.5 * pred_h;
bbox[0] = max((T) 0, min(bbox[0], im_w - (T) 1));
bbox[1] = max((T) 0, min(bbox[1], im_h - (T) 1));
bbox[2] = max((T) 0, min(bbox[2], im_w - (T) 1));
bbox[3] = max((T) 0, min(bbox[3], im_h - (T) 1));
const T box_w = bbox[2] - bbox[0] + (T) 1;
const T box_h = bbox[3] - bbox[1] + (T) 1;
return (box_w >= min_box_w) * (box_h >= min_box_h);
}
/******************** Proposal ********************/
template<typename T>
__global__ void _GenerateProposals(
const int nthreads,
const int A,
const int feat_h,
const int feat_w,
const int stride,
const float im_h,
const float im_w,
const float min_box_h,
const float min_box_w,
const T *scores,
const T *bbox_deltas,
const T *anchors,
T *proposals) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
const int h = idx / A / feat_w;
const int w = (idx / A) % feat_w;
const int a = idx % A;
const T x = w * stride;
const T y = h * stride;
const T *bbox_delta = bbox_deltas + h * feat_w + w;
const T *score = scores + h * feat_w + w;
const int K = feat_h * feat_w;
const T dx = bbox_delta[(a * 4 + 0) * K];
const T dy = bbox_delta[(a * 4 + 1) * K];
const T d_log_w = bbox_delta[(a * 4 + 2) * K];
const T d_log_h = bbox_delta[(a * 4 + 3) * K];
T *proposal = proposals + idx * 5;
proposal[0] = x + anchors[a * 4 + 0];
proposal[1] = y + anchors[a * 4 + 1];
proposal[2] = x + anchors[a * 4 + 2];
proposal[3] = y + anchors[a * 4 + 3];
proposal[4] = _BBoxTransform(
dx, dy, d_log_w, d_log_h,
im_w, im_h, min_box_w, min_box_h,
proposal) * score[a * K];
}
}
template<>
void GenerateProposals<float, CUDAContext>(
const int A,
const int feat_h,
const int feat_w,
const int stride,
const float im_h,
const float im_w,
const float min_box_h,
const float min_box_w,
const float *scores,
const float *bbox_deltas,
const float *anchors,
float *proposals,
CUDAContext *ctx) {
const auto num_proposals = A * feat_h * feat_w;
RUN_KERNEL_STREAM(_GenerateProposals<float>,
CUDA_BLOCKS(num_proposals), CUDA_THREADS,
0, ctx->cuda_stream(),
num_proposals, A, feat_h, feat_w, stride,
im_h, im_w, min_box_h, min_box_w,
scores, bbox_deltas, anchors, proposals);
}
template<typename T>
__global__ void _GenerateProposals_v2(
const int nthreads,
const float im_h,
const float im_w,
const float min_box_h,
const float min_box_w,
const T *scores,
const T *bbox_deltas,
T *proposals) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
const float dx = bbox_deltas[idx];
const float dy = bbox_deltas[nthreads + idx];
const float d_log_w = bbox_deltas[2 * nthreads + idx];
const float d_log_h = bbox_deltas[3 * nthreads + idx];
T *proposal = proposals + idx * 5;
proposal[4] = _BBoxTransform(
dx, dy, d_log_w, d_log_h,
im_w, im_h, min_box_w, min_box_h,
proposal) * scores[idx];
}
}
template<>
void GenerateProposals_v2<float, CUDAContext>(
const int total_anchors,
const float im_h,
const float im_w,
const float min_box_h,
const float min_box_w,
const float *scores,
const float *bbox_deltas,
float *proposals,
CUDAContext *ctx) {
RUN_KERNEL_STREAM(_GenerateProposals_v2<float>,
CUDA_BLOCKS(total_anchors), CUDA_THREADS,
0, ctx->cuda_stream(),
total_anchors, im_h, im_w, min_box_h, min_box_w,
scores, bbox_deltas, proposals);
}
/******************** NMS ********************/
#define DIV_UP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define NMS_BLOCK_SIZE 64
template<typename T>
__device__ T iou(const T *A, const T *B) {
const T x1 = max(A[0], B[0]);
const T y1 = max(A[1], B[1]);
const T x2 = min(A[2], B[2]);
const T y2 = min(A[3], B[3]);
const T width = max((T) 0, x2 - x1 + 1);
const T height = max((T) 0, y2 - y1 + 1);
const T area = width * height;
const T A_area = (A[2] - A[0] + 1) * (A[3] - A[1] + 1);
const T B_area = (B[2] - B[0] + 1) * (B[3] - B[1] + 1);
return area / (A_area + B_area - area);
}
template<typename T>
__global__ void nms_mask(
const int num_boxes,
const T nms_thresh,
const T *boxes,
uint64_t *mask) {
const int i_start = blockIdx.x * NMS_BLOCK_SIZE;
const int di_end = min(num_boxes - i_start, NMS_BLOCK_SIZE);
const int j_start = blockIdx.y * NMS_BLOCK_SIZE;
const int dj_end = min(num_boxes - j_start, NMS_BLOCK_SIZE);
const int num_blocks = DIV_UP(num_boxes, NMS_BLOCK_SIZE);
const int bid = blockIdx.x;
const int tid = threadIdx.x;
__shared__ T boxes_i[NMS_BLOCK_SIZE * 4];
if (tid < di_end) {
boxes_i[tid * 4 + 0] = boxes[(i_start + tid) * 5 + 0];
boxes_i[tid * 4 + 1] = boxes[(i_start + tid) * 5 + 1];
boxes_i[tid * 4 + 2] = boxes[(i_start + tid) * 5 + 2];
boxes_i[tid * 4 + 3] = boxes[(i_start + tid) * 5 + 3];
}
__syncthreads();
if (tid < dj_end) {
const T *const box_j = boxes + (j_start + tid) * 5;
unsigned long long mask_j = 0;
const int di_start = (i_start == j_start) ? (tid + 1) : 0;
for (int di = di_start; di < di_end; ++di)
if (iou(box_j, boxes_i + di * 4) > nms_thresh)
mask_j |= 1ULL << di;
mask[(j_start + tid) * num_blocks + bid] = mask_j;
}
}
template<typename T>
void _ApplyNMS(
const int num_boxes,
const int max_keeps,
const float thresh,
const T *boxes,
int *keep_indices,
int &num_keep,
CUDAContext *ctx) {
const int num_blocks = DIV_UP(num_boxes, NMS_BLOCK_SIZE);
const dim3 blocks(num_blocks, num_blocks);
size_t mask_nbytes = num_boxes * num_blocks * sizeof(uint64_t);
size_t boxes_nbytes = num_boxes * 5 * sizeof(T);
void *boxes_dev, *mask_dev;
CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_nbytes));
CUDA_CHECK(cudaMalloc(&mask_dev, mask_nbytes));
CUDA_CHECK(cudaMemcpy(boxes_dev, boxes,
boxes_nbytes, cudaMemcpyHostToDevice));
RUN_KERNEL_STREAM(nms_mask<T>,
blocks, NMS_BLOCK_SIZE,
0, ctx->cuda_stream(), num_boxes,
thresh, (T *) boxes_dev, (uint64_t *) mask_dev);
CUDA_CHECK(cudaPeekAtLastError());
std::vector<uint64_t> mask_host(num_boxes * num_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev,
mask_nbytes, cudaMemcpyDeviceToHost));
std::vector<uint64_t> dead_bit(num_blocks);
std::memset(&dead_bit[0], 0, sizeof(uint64_t) * num_blocks);
int num_selected = 0;
for (int i = 0; i < num_boxes; ++i) {
const int nblock = i / NMS_BLOCK_SIZE;
const int inblock = i % NMS_BLOCK_SIZE;
if (!(dead_bit[nblock] & (1ULL << inblock))) {
keep_indices[num_selected++] = i;
uint64_t *mask_i = &mask_host[0] + i * num_blocks;
for (int j = nblock; j < num_blocks; ++j) dead_bit[j] |= mask_i[j];
if (num_selected == max_keeps) break;
}
}
num_keep = num_selected;
CUDA_CHECK(cudaFree(mask_dev));
CUDA_CHECK(cudaFree(boxes_dev));
}
template<>
void ApplyNMS<float, CUDAContext>(
const int num_boxes,
const int max_keeps,
const float thresh,
const float *boxes,
int *keep_indices,
int &num_keep,
CUDAContext *ctx) {
_ApplyNMS<float>(num_boxes, max_keeps, thresh,
boxes, keep_indices, num_keep, ctx);
}
} // namespace rcnn
} // namespace dragon
} // namespace ts
|
3decdc8006fb41ccc6ae26f41d1ad3343edd558e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Raytracer.h"
__device__ Intersection GetIntersection(SceneData data, Ray ray, float minInt, float maxInt) {
Intersection closestInt;
glm::vec3 d_model;
glm::vec3 p0_model;
closestInt.T = maxInt;
closestInt.ClosestShape = -1;
for (int i = 0; i < data.NumShapes; i++) {
d_model = glm::vec3(data.Shapes[i].InverseTransform * glm::vec4(ray.D, 0));
p0_model = glm::vec3(data.Shapes[i].InverseTransform * glm::vec4(ray.P0, 1));
if (data.Shapes[i].Type == Shape::SPHERE) {
float A = glm::dot(d_model, d_model);
float B = 2.0f * glm::dot(d_model, (p0_model - data.Shapes[i].Position));
float C = glm::dot((p0_model - data.Shapes[i].Position), (p0_model - data.Shapes[i].Position))
- data.Shapes[i].Radius * data.Shapes[i].Radius;
float descriminant = B * B - 4 * A * C;
float t;
if (descriminant < 0) {
continue;
} else if (descriminant == 0) {
t = -B / (2.0f * A);
if (t < closestInt.T && t > minInt && t < maxInt) {
closestInt.T = t;
closestInt.ClosestShape = i;
closestInt.SurfaceNormal = glm::normalize(glm::vec3(glm::inverseTranspose(data.Shapes[i].Transform) * glm::vec4((p0_model + t * d_model) - data.Shapes[i].Position, 0)));
}
} else {
float root = std::sqrt(descriminant);
t = min(((-B + root) / (2.0f * A)), ((-B - root) / (2.0f * A)));
if (t < closestInt.T && t > minInt && t < maxInt) {
closestInt.T = t;
closestInt.ClosestShape = i;
closestInt.SurfaceNormal = glm::normalize(glm::vec3(glm::inverseTranspose(data.Shapes[i].Transform) * glm::vec4((p0_model + t * d_model) - data.Shapes[i].Position, 0)));
}
}
} else if (data.Shapes[i].Type == Shape::PLANE) {
glm::vec3 P = glm::normalize(data.Shapes[i].Normal) * data.Shapes[i].Distance;
float denom = glm::dot(d_model, data.Shapes[i].Normal);
float t = glm::dot((P - p0_model), data.Shapes[i].Normal) / denom;
if (abs(denom) > 0.001f && t < closestInt.T && t > minInt && t < maxInt) {
closestInt.T = t;
closestInt.ClosestShape = i;
closestInt.SurfaceNormal = glm::normalize(glm::vec3(glm::inverseTranspose(data.Shapes[i].Transform) * glm::vec4(data.Shapes[i].Normal, 0)));
}
}
}
return closestInt;
}
__device__ glm::vec3 GetLightingAtIntersection(SceneData data, Intersection inter, Ray ray) {
glm::vec3 intersectionColor(0);
Shape curShape = data.Shapes[inter.ClosestShape];
Intersection lightInt;
glm::vec3 intersectionPoint = ray.P0 + inter.T * ray.D;
glm::vec3 lightPosition_model;
Ray lightRay;
lightRay.P0 = intersectionPoint;
intersectionColor = curShape.Pig.Color * curShape.Fin.Ambient;
for (int i = 0; i < data.NumLights; i++) {
lightRay.D = glm::normalize(data.Lights[i].Position - intersectionPoint);
lightInt = GetIntersection(data, lightRay, 0.01f,
glm::length(data.Lights[i].Position - intersectionPoint));
if (lightInt.ClosestShape < 0) {
glm::vec3 reflection = glm::normalize(-lightRay.D + 2.0f * (max(glm::dot(lightRay.D, inter.SurfaceNormal), 0.0f)) * inter.SurfaceNormal);
intersectionColor += curShape.Pig.Color * max(glm::dot(inter.SurfaceNormal, lightRay.D), 0.0f) * data.Lights[i].Color * curShape.Fin.Diffuse;
if (data.ShadingType == Raytracer::PHONG) {
intersectionColor += curShape.Pig.Color * pow(max(glm::dot(-ray.D, reflection), 0.0f), 1.0f / curShape.Fin.Roughness) * data.Lights[i].Color * curShape.Fin.Specular;
} else if (data.ShadingType == Raytracer::GAUSSIAN) {
glm::vec3 rayReflect = glm::normalize(- 2.0f * glm::dot(ray.D, inter.SurfaceNormal) * inter.SurfaceNormal + ray.D);
float exponent = acos(glm::dot(rayReflect, lightRay.D)) / curShape.Fin.Roughness;
exponent = -(exponent * exponent);
intersectionColor += pow(2.71828f, exponent) * curShape.Fin.Specular * data.Lights[i].Color;
}
}
}
intersectionColor.x = min(intersectionColor.x, 1.0f);
intersectionColor.y = min(intersectionColor.y, 1.0f);
intersectionColor.z = min(intersectionColor.z, 1.0f);
return intersectionColor;
}
__global__ void CUDATrace(SceneData data, color_t *scenePixels, int N) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int sceneIx = i * data.Height + j;
if (i * blockDim.x + j > N) return;
scenePixels[sceneIx].r = 0;
scenePixels[sceneIx].g = 0;
scenePixels[sceneIx].b = 0;
scenePixels[sceneIx].f = 0;
float Us = data.Params.Left + (data.Params.Right - data.Params.Left) * ((i + 0.5f) / ((float) data.Width));
float Vs = data.Params.Bottom + (data.Params.Top - data.Params.Bottom) * ((j + 0.5f) / ((float) data.Height));
glm::vec3 sPrime = data.Cam.Location + Us * data.Params.U + Vs * data.Params.V + -1.0f * data.Params.W;
Ray castRay;
glm::vec3 pixelColor(0);
castRay.D = glm::normalize(sPrime - data.Cam.Location);
castRay.P0 = data.Cam.Location;
Intersection closestInt = GetIntersection(data, castRay, FLT_MIN, FLT_MAX);
glm::vec3 bounceColors[NUM_REFLECTIONS];
float shapeReflection[NUM_REFLECTIONS] = {0.0f};
for (int i = 0; i < NUM_REFLECTIONS && closestInt.ClosestShape >= 0; i++) {
shapeReflection[i] = data.Shapes[closestInt.ClosestShape].Fin.Reflection;
bounceColors[i] = GetLightingAtIntersection(data, closestInt, castRay);
castRay.P0 = castRay.P0 + closestInt.T * castRay.D;
castRay.D = glm::normalize(- 2.0f * glm::dot(castRay.D, closestInt.SurfaceNormal) * closestInt.SurfaceNormal + castRay.D);
closestInt = GetIntersection(data, castRay, 0.01f, FLT_MAX);
}
for (int i = NUM_REFLECTIONS - 1; i >= 0; i--) {
pixelColor = (1.0f - shapeReflection[i]) * bounceColors[i] + shapeReflection[i] * pixelColor;
}
pixelColor.x = min(pixelColor.x, 1.0f);
pixelColor.y = min(pixelColor.y, 1.0f);
pixelColor.z = min(pixelColor.z, 1.0f);
scenePixels[sceneIx].r = pixelColor.x;
scenePixels[sceneIx].g = pixelColor.y;
scenePixels[sceneIx].b = pixelColor.z;
scenePixels[sceneIx].f = 0;
}
Raytracer::Raytracer(int width, int height, int shadingType, std::vector<std::string> rawComponents) {
Data.Width = width;
Data.Height = height;
Data.ShadingType = shadingType;
ParseRawComponents(rawComponents);
}
Image* Raytracer::TraceScene() {
TracedScene = new Image(Data.Width, Data.Height);
Data.Params.U = glm::normalize(glm::vec3(Data.Cam.Right.x, Data.Cam.Right.y, Data.Cam.Right.z));
Data.Params.V = glm::normalize(glm::vec3(Data.Cam.Up.x, Data.Cam.Up.y, Data.Cam.Up.z));
Data.Params.W = glm::normalize(glm::cross(Data.Params.U, Data.Params.V));
Data.Params.Left = -1 * glm::length(Data.Cam.Right) / 2.0;
Data.Params.Right = -Data.Params.Left;
Data.Params.Top = glm::length(Data.Cam.Up) / 2.0;
Data.Params.Bottom = -Data.Params.Top;
SetupAndLaunchCUDA();
return TracedScene;
}
void Raytracer::SetupAndLaunchCUDA() {
dim3 threadsPerBlock(8,8);
dim3 numBlocks(Data.Width / threadsPerBlock.x,
Data.Height / threadsPerBlock.y);
SceneData cudaData_d = Data;
color_t *scenePixels_h = new color_t[Data.Width * Data.Height];
color_t *scenePixels_d;
LightSource *lights_d;
Shape *shapes_d;
HandleCUDAError(hipMalloc((void **) &lights_d, Data.NumLights * sizeof(LightSource)));
HandleCUDAError(hipMalloc((void **) &shapes_d, Data.NumShapes * sizeof(Shape)));
HandleCUDAError(hipMemcpy(lights_d, Data.Lights, Data.NumLights * sizeof(LightSource), hipMemcpyHostToDevice));
HandleCUDAError(hipMemcpy(shapes_d, Data.Shapes, Data.NumShapes * sizeof(Shape), hipMemcpyHostToDevice));
HandleCUDAError(hipMalloc((void **) &scenePixels_d, Data.Width * Data.Height * sizeof(color_t)));
cudaData_d.Lights = lights_d;
cudaData_d.Shapes = shapes_d;
hipLaunchKernelGGL(( CUDATrace) , dim3(numBlocks), dim3(threadsPerBlock), 0, 0, cudaData_d, scenePixels_d, Data.Width * Data.Height);
HandleCUDAError(hipMemcpy(scenePixels_h, scenePixels_d, Data.Width * Data.Height * sizeof(color_t), hipMemcpyDeviceToHost));
for (int i = 0; i < Data.Width; i++) {
for (int j = 0; j < Data.Height; j++) {
TracedScene->pixel(i, j, scenePixels_h[i * Data.Height + j]);
}
}
HandleCUDAError(hipFree(lights_d));
HandleCUDAError(hipFree(shapes_d));
HandleCUDAError(hipFree(scenePixels_d));
}
void Raytracer::ParseRawComponents(std::vector<std::string> components) {
for (int i = 0; i < components.size(); i++) {
std::string curComp = components[i];
if (std::string::npos != curComp.find("camera")) {
Data.Cam = *(new Camera(curComp));
} else if (std::string::npos != curComp.find("light_source")) {
Lights.push_back(new LightSource(curComp));
} else if (std::string::npos != curComp.find("sphere")) {
Shapes.push_back(new Shape(curComp, Shape::SPHERE));
} else if (std::string::npos != curComp.find("plane")) {
Shape *curShape = new Shape(curComp, Shape::PLANE);
Shapes.push_back(curShape);
//printf("Normal: %f, %f, %f Distance: %f\n", curShape->Normal.x, curShape->Normal.y, curShape->Normal.z, curShape->Distance);
}
}
Data.Lights = new LightSource[Lights.size()];
Data.NumLights = Lights.size();
for (int i = 0; i < Lights.size(); i++) {
Data.Lights[i] = *Lights[i];
}
Data.Shapes = new Shape[Shapes.size()];
Data.NumShapes = Shapes.size();
for (int i = 0; i < Shapes.size(); i++) {
Data.Shapes[i] = *Shapes[i];
}
}
void Raytracer::HandleCUDAError(hipError_t error) {
if (error != hipSuccess) {
printf("CUDA Problem: %s\n", hipGetErrorString(error));
exit(-1);
}
}
| 3decdc8006fb41ccc6ae26f41d1ad3343edd558e.cu | #include "Raytracer.h"
__device__ Intersection GetIntersection(SceneData data, Ray ray, float minInt, float maxInt) {
Intersection closestInt;
glm::vec3 d_model;
glm::vec3 p0_model;
closestInt.T = maxInt;
closestInt.ClosestShape = -1;
for (int i = 0; i < data.NumShapes; i++) {
d_model = glm::vec3(data.Shapes[i].InverseTransform * glm::vec4(ray.D, 0));
p0_model = glm::vec3(data.Shapes[i].InverseTransform * glm::vec4(ray.P0, 1));
if (data.Shapes[i].Type == Shape::SPHERE) {
float A = glm::dot(d_model, d_model);
float B = 2.0f * glm::dot(d_model, (p0_model - data.Shapes[i].Position));
float C = glm::dot((p0_model - data.Shapes[i].Position), (p0_model - data.Shapes[i].Position))
- data.Shapes[i].Radius * data.Shapes[i].Radius;
float descriminant = B * B - 4 * A * C;
float t;
if (descriminant < 0) {
continue;
} else if (descriminant == 0) {
t = -B / (2.0f * A);
if (t < closestInt.T && t > minInt && t < maxInt) {
closestInt.T = t;
closestInt.ClosestShape = i;
closestInt.SurfaceNormal = glm::normalize(glm::vec3(glm::inverseTranspose(data.Shapes[i].Transform) * glm::vec4((p0_model + t * d_model) - data.Shapes[i].Position, 0)));
}
} else {
float root = std::sqrt(descriminant);
t = min(((-B + root) / (2.0f * A)), ((-B - root) / (2.0f * A)));
if (t < closestInt.T && t > minInt && t < maxInt) {
closestInt.T = t;
closestInt.ClosestShape = i;
closestInt.SurfaceNormal = glm::normalize(glm::vec3(glm::inverseTranspose(data.Shapes[i].Transform) * glm::vec4((p0_model + t * d_model) - data.Shapes[i].Position, 0)));
}
}
} else if (data.Shapes[i].Type == Shape::PLANE) {
glm::vec3 P = glm::normalize(data.Shapes[i].Normal) * data.Shapes[i].Distance;
float denom = glm::dot(d_model, data.Shapes[i].Normal);
float t = glm::dot((P - p0_model), data.Shapes[i].Normal) / denom;
if (abs(denom) > 0.001f && t < closestInt.T && t > minInt && t < maxInt) {
closestInt.T = t;
closestInt.ClosestShape = i;
closestInt.SurfaceNormal = glm::normalize(glm::vec3(glm::inverseTranspose(data.Shapes[i].Transform) * glm::vec4(data.Shapes[i].Normal, 0)));
}
}
}
return closestInt;
}
__device__ glm::vec3 GetLightingAtIntersection(SceneData data, Intersection inter, Ray ray) {
glm::vec3 intersectionColor(0);
Shape curShape = data.Shapes[inter.ClosestShape];
Intersection lightInt;
glm::vec3 intersectionPoint = ray.P0 + inter.T * ray.D;
glm::vec3 lightPosition_model;
Ray lightRay;
lightRay.P0 = intersectionPoint;
intersectionColor = curShape.Pig.Color * curShape.Fin.Ambient;
for (int i = 0; i < data.NumLights; i++) {
lightRay.D = glm::normalize(data.Lights[i].Position - intersectionPoint);
lightInt = GetIntersection(data, lightRay, 0.01f,
glm::length(data.Lights[i].Position - intersectionPoint));
if (lightInt.ClosestShape < 0) {
glm::vec3 reflection = glm::normalize(-lightRay.D + 2.0f * (max(glm::dot(lightRay.D, inter.SurfaceNormal), 0.0f)) * inter.SurfaceNormal);
intersectionColor += curShape.Pig.Color * max(glm::dot(inter.SurfaceNormal, lightRay.D), 0.0f) * data.Lights[i].Color * curShape.Fin.Diffuse;
if (data.ShadingType == Raytracer::PHONG) {
intersectionColor += curShape.Pig.Color * pow(max(glm::dot(-ray.D, reflection), 0.0f), 1.0f / curShape.Fin.Roughness) * data.Lights[i].Color * curShape.Fin.Specular;
} else if (data.ShadingType == Raytracer::GAUSSIAN) {
glm::vec3 rayReflect = glm::normalize(- 2.0f * glm::dot(ray.D, inter.SurfaceNormal) * inter.SurfaceNormal + ray.D);
float exponent = acos(glm::dot(rayReflect, lightRay.D)) / curShape.Fin.Roughness;
exponent = -(exponent * exponent);
intersectionColor += pow(2.71828f, exponent) * curShape.Fin.Specular * data.Lights[i].Color;
}
}
}
intersectionColor.x = min(intersectionColor.x, 1.0f);
intersectionColor.y = min(intersectionColor.y, 1.0f);
intersectionColor.z = min(intersectionColor.z, 1.0f);
return intersectionColor;
}
__global__ void CUDATrace(SceneData data, color_t *scenePixels, int N) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int sceneIx = i * data.Height + j;
if (i * blockDim.x + j > N) return;
scenePixels[sceneIx].r = 0;
scenePixels[sceneIx].g = 0;
scenePixels[sceneIx].b = 0;
scenePixels[sceneIx].f = 0;
float Us = data.Params.Left + (data.Params.Right - data.Params.Left) * ((i + 0.5f) / ((float) data.Width));
float Vs = data.Params.Bottom + (data.Params.Top - data.Params.Bottom) * ((j + 0.5f) / ((float) data.Height));
glm::vec3 sPrime = data.Cam.Location + Us * data.Params.U + Vs * data.Params.V + -1.0f * data.Params.W;
Ray castRay;
glm::vec3 pixelColor(0);
castRay.D = glm::normalize(sPrime - data.Cam.Location);
castRay.P0 = data.Cam.Location;
Intersection closestInt = GetIntersection(data, castRay, FLT_MIN, FLT_MAX);
glm::vec3 bounceColors[NUM_REFLECTIONS];
float shapeReflection[NUM_REFLECTIONS] = {0.0f};
for (int i = 0; i < NUM_REFLECTIONS && closestInt.ClosestShape >= 0; i++) {
shapeReflection[i] = data.Shapes[closestInt.ClosestShape].Fin.Reflection;
bounceColors[i] = GetLightingAtIntersection(data, closestInt, castRay);
castRay.P0 = castRay.P0 + closestInt.T * castRay.D;
castRay.D = glm::normalize(- 2.0f * glm::dot(castRay.D, closestInt.SurfaceNormal) * closestInt.SurfaceNormal + castRay.D);
closestInt = GetIntersection(data, castRay, 0.01f, FLT_MAX);
}
for (int i = NUM_REFLECTIONS - 1; i >= 0; i--) {
pixelColor = (1.0f - shapeReflection[i]) * bounceColors[i] + shapeReflection[i] * pixelColor;
}
pixelColor.x = min(pixelColor.x, 1.0f);
pixelColor.y = min(pixelColor.y, 1.0f);
pixelColor.z = min(pixelColor.z, 1.0f);
scenePixels[sceneIx].r = pixelColor.x;
scenePixels[sceneIx].g = pixelColor.y;
scenePixels[sceneIx].b = pixelColor.z;
scenePixels[sceneIx].f = 0;
}
Raytracer::Raytracer(int width, int height, int shadingType, std::vector<std::string> rawComponents) {
Data.Width = width;
Data.Height = height;
Data.ShadingType = shadingType;
ParseRawComponents(rawComponents);
}
Image* Raytracer::TraceScene() {
TracedScene = new Image(Data.Width, Data.Height);
Data.Params.U = glm::normalize(glm::vec3(Data.Cam.Right.x, Data.Cam.Right.y, Data.Cam.Right.z));
Data.Params.V = glm::normalize(glm::vec3(Data.Cam.Up.x, Data.Cam.Up.y, Data.Cam.Up.z));
Data.Params.W = glm::normalize(glm::cross(Data.Params.U, Data.Params.V));
Data.Params.Left = -1 * glm::length(Data.Cam.Right) / 2.0;
Data.Params.Right = -Data.Params.Left;
Data.Params.Top = glm::length(Data.Cam.Up) / 2.0;
Data.Params.Bottom = -Data.Params.Top;
SetupAndLaunchCUDA();
return TracedScene;
}
void Raytracer::SetupAndLaunchCUDA() {
dim3 threadsPerBlock(8,8);
dim3 numBlocks(Data.Width / threadsPerBlock.x,
Data.Height / threadsPerBlock.y);
SceneData cudaData_d = Data;
color_t *scenePixels_h = new color_t[Data.Width * Data.Height];
color_t *scenePixels_d;
LightSource *lights_d;
Shape *shapes_d;
HandleCUDAError(cudaMalloc((void **) &lights_d, Data.NumLights * sizeof(LightSource)));
HandleCUDAError(cudaMalloc((void **) &shapes_d, Data.NumShapes * sizeof(Shape)));
HandleCUDAError(cudaMemcpy(lights_d, Data.Lights, Data.NumLights * sizeof(LightSource), cudaMemcpyHostToDevice));
HandleCUDAError(cudaMemcpy(shapes_d, Data.Shapes, Data.NumShapes * sizeof(Shape), cudaMemcpyHostToDevice));
HandleCUDAError(cudaMalloc((void **) &scenePixels_d, Data.Width * Data.Height * sizeof(color_t)));
cudaData_d.Lights = lights_d;
cudaData_d.Shapes = shapes_d;
CUDATrace <<<numBlocks, threadsPerBlock>>> (cudaData_d, scenePixels_d, Data.Width * Data.Height);
HandleCUDAError(cudaMemcpy(scenePixels_h, scenePixels_d, Data.Width * Data.Height * sizeof(color_t), cudaMemcpyDeviceToHost));
for (int i = 0; i < Data.Width; i++) {
for (int j = 0; j < Data.Height; j++) {
TracedScene->pixel(i, j, scenePixels_h[i * Data.Height + j]);
}
}
HandleCUDAError(cudaFree(lights_d));
HandleCUDAError(cudaFree(shapes_d));
HandleCUDAError(cudaFree(scenePixels_d));
}
void Raytracer::ParseRawComponents(std::vector<std::string> components) {
for (int i = 0; i < components.size(); i++) {
std::string curComp = components[i];
if (std::string::npos != curComp.find("camera")) {
Data.Cam = *(new Camera(curComp));
} else if (std::string::npos != curComp.find("light_source")) {
Lights.push_back(new LightSource(curComp));
} else if (std::string::npos != curComp.find("sphere")) {
Shapes.push_back(new Shape(curComp, Shape::SPHERE));
} else if (std::string::npos != curComp.find("plane")) {
Shape *curShape = new Shape(curComp, Shape::PLANE);
Shapes.push_back(curShape);
//printf("Normal: %f, %f, %f Distance: %f\n", curShape->Normal.x, curShape->Normal.y, curShape->Normal.z, curShape->Distance);
}
}
Data.Lights = new LightSource[Lights.size()];
Data.NumLights = Lights.size();
for (int i = 0; i < Lights.size(); i++) {
Data.Lights[i] = *Lights[i];
}
Data.Shapes = new Shape[Shapes.size()];
Data.NumShapes = Shapes.size();
for (int i = 0; i < Shapes.size(); i++) {
Data.Shapes[i] = *Shapes[i];
}
}
void Raytracer::HandleCUDAError(cudaError_t error) {
if (error != cudaSuccess) {
printf("CUDA Problem: %s\n", cudaGetErrorString(error));
exit(-1);
}
}
|
19c45426787454859236ced02012062c44271e61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Useful functions for the PVM that are specific to the tracker
// derivative and error calculation
__global__ void der_and_error_kernel(double *A, double *B, double *C,
unsigned int L)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L; i += stride)
{
C[i] = 0.5 * (1. + A[i] - B[i]);
}
}
// integral calculation
__global__ void integral_kernel(double *A, double *B, double *C,
unsigned int L, double tau = 0.5)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L; i += stride)
{
// this is the same as tau * A[i] + (1 - tau) * B[i]
C[i] = tau * (A[i] - B[i]) + B[i];
}
}
// appending the hidden values passed to upper layers as inputs to
// the end of the inputs (you need to do this to use the integral_kernel
// and the der_and_error_kernel)
__global__ void hid_append_kernel(double *inputs, double *hidden,
double *concat_arr, unsigned int *map,
unsigned int L_inputs, unsigned int L_concat_arr)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L_concat_arr; i += stride)
{
if (i < L_inputs)
{
concat_arr[i] = inputs[i];
}
else
{
concat_arr[i] = hidden[map[i-L_inputs]];
}
}
}
// Average pooling of heatmaps from each layer
__global__ void tracker_avg_pool_kernel(double *heat_maps,
double *avg_heat_map, unsigned int L_avg, unsigned int N_layers)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int L_heat_maps = N_layers * L_avg;
for (unsigned int i = start; i < L_avg; i += stride)
{
double sum = 0.;
for (unsigned int j = i; j < L_heat_maps; j += L_avg)
{
sum += heat_maps[j];
}
avg_heat_map[i] = sum / N_layers;
}
}
__global__ void full_input_map_kernel(double *full_input,
double *sub_array, const unsigned int *map, unsigned int L_sub)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L_sub; i += stride)
{
full_input[map[i]] = sub_array[i];
}
}
__global__ void hidden_map_to_full_input_kernel(double *full_input,
const double *hidden, const int *map, unsigned int L_full)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L_full; i += stride)
{
int idx = map[i];
if (idx != -1)
{
full_input[i] = hidden[idx];
}
}
}
__global__ void gradient_inv_hid_map_kernel(double *grad_wrt_hid,
double *grad_wrt_full_input,
const unsigned int *map,
const unsigned int L_full)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L_full; i += stride)
{
int idx = map[i];
if (idx != -1)
{
atomicAdd(&grad_wrt_hid[idx], grad_wrt_full_input[i]);
}
}
}
__global__ void output_pred_map_kernel(double *sub_array,
double *out_and_pred, const unsigned int *map, unsigned int L_sub)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L_sub; i += stride)
{
sub_array[i] = out_and_pred[map[i]];
}
}
__global__ void rev_output_pred_map_kernel(double *sub_array,
double *out_and_pred, const unsigned int *map, unsigned int L_sub)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L_sub; i += stride)
{
out_and_pred[map[i]] = sub_array[i];
}
}
__global__ void SquareErrorDerTrackerKernel(double *avg_heatmap,
double *gt_heatmap, double *delta_tracker,
unsigned int L_avg, unsigned int N_layers)
{
const unsigned int L_heat_maps = N_layers * L_avg;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
i < L_avg;
i += stride)
{
double tmp = (avg_heatmap[i] - gt_heatmap[i]) / N_layers;
for (unsigned int j = i; j < L_heat_maps; j += L_avg)
{
delta_tracker[j] = tmp;
}
}
}
__global__ void PatchedSumImageKernel(double *A,
double *summed_Arr,
uint A_width,
uint A_height,
uint n_color,
uint width,
uint height)
{
uint N_s_row = A_height - height + 1;
uint N_s_col = A_width - width + 1;
uint stride_x = blockDim.x * gridDim.x;
uint stride_y = blockDim.y * gridDim.y;
for (uint i = threadIdx.x + blockIdx.x * blockDim.x;
i < N_s_col;
i += stride_x)
{
for (uint j = threadIdx.y + blockIdx.y * blockDim.y;
j < N_s_row;
j += stride_y)
{
double sum = 0.;
for (uint k = 0; k < width; k++)
{
for (uint l = 0; l < height; l++)
{
for (uint m = 0; m < n_color; m++)
sum += A[(i + k) * n_color + (j + l) * n_color * A_width + m];
}
}
summed_Arr[i + j * N_s_col] = sum;
}
}
}
| 19c45426787454859236ced02012062c44271e61.cu | // Useful functions for the PVM that are specific to the tracker
// derivative and error calculation
__global__ void der_and_error_kernel(double *A, double *B, double *C,
unsigned int L)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L; i += stride)
{
C[i] = 0.5 * (1. + A[i] - B[i]);
}
}
// integral calculation
__global__ void integral_kernel(double *A, double *B, double *C,
unsigned int L, double tau = 0.5)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L; i += stride)
{
// this is the same as tau * A[i] + (1 - tau) * B[i]
C[i] = tau * (A[i] - B[i]) + B[i];
}
}
// appending the hidden values passed to upper layers as inputs to
// the end of the inputs (you need to do this to use the integral_kernel
// and the der_and_error_kernel)
__global__ void hid_append_kernel(double *inputs, double *hidden,
double *concat_arr, unsigned int *map,
unsigned int L_inputs, unsigned int L_concat_arr)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L_concat_arr; i += stride)
{
if (i < L_inputs)
{
concat_arr[i] = inputs[i];
}
else
{
concat_arr[i] = hidden[map[i-L_inputs]];
}
}
}
// Average pooling of heatmaps from each layer
__global__ void tracker_avg_pool_kernel(double *heat_maps,
double *avg_heat_map, unsigned int L_avg, unsigned int N_layers)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int L_heat_maps = N_layers * L_avg;
for (unsigned int i = start; i < L_avg; i += stride)
{
double sum = 0.;
for (unsigned int j = i; j < L_heat_maps; j += L_avg)
{
sum += heat_maps[j];
}
avg_heat_map[i] = sum / N_layers;
}
}
__global__ void full_input_map_kernel(double *full_input,
double *sub_array, const unsigned int *map, unsigned int L_sub)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L_sub; i += stride)
{
full_input[map[i]] = sub_array[i];
}
}
__global__ void hidden_map_to_full_input_kernel(double *full_input,
const double *hidden, const int *map, unsigned int L_full)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L_full; i += stride)
{
int idx = map[i];
if (idx != -1)
{
full_input[i] = hidden[idx];
}
}
}
__global__ void gradient_inv_hid_map_kernel(double *grad_wrt_hid,
double *grad_wrt_full_input,
const unsigned int *map,
const unsigned int L_full)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L_full; i += stride)
{
int idx = map[i];
if (idx != -1)
{
atomicAdd(&grad_wrt_hid[idx], grad_wrt_full_input[i]);
}
}
}
__global__ void output_pred_map_kernel(double *sub_array,
double *out_and_pred, const unsigned int *map, unsigned int L_sub)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L_sub; i += stride)
{
sub_array[i] = out_and_pred[map[i]];
}
}
__global__ void rev_output_pred_map_kernel(double *sub_array,
double *out_and_pred, const unsigned int *map, unsigned int L_sub)
{
unsigned int stride = blockDim.x * gridDim.x;
unsigned int start = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i = start; i < L_sub; i += stride)
{
out_and_pred[map[i]] = sub_array[i];
}
}
__global__ void SquareErrorDerTrackerKernel(double *avg_heatmap,
double *gt_heatmap, double *delta_tracker,
unsigned int L_avg, unsigned int N_layers)
{
const unsigned int L_heat_maps = N_layers * L_avg;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
i < L_avg;
i += stride)
{
double tmp = (avg_heatmap[i] - gt_heatmap[i]) / N_layers;
for (unsigned int j = i; j < L_heat_maps; j += L_avg)
{
delta_tracker[j] = tmp;
}
}
}
__global__ void PatchedSumImageKernel(double *A,
double *summed_Arr,
uint A_width,
uint A_height,
uint n_color,
uint width,
uint height)
{
uint N_s_row = A_height - height + 1;
uint N_s_col = A_width - width + 1;
uint stride_x = blockDim.x * gridDim.x;
uint stride_y = blockDim.y * gridDim.y;
for (uint i = threadIdx.x + blockIdx.x * blockDim.x;
i < N_s_col;
i += stride_x)
{
for (uint j = threadIdx.y + blockIdx.y * blockDim.y;
j < N_s_row;
j += stride_y)
{
double sum = 0.;
for (uint k = 0; k < width; k++)
{
for (uint l = 0; l < height; l++)
{
for (uint m = 0; m < n_color; m++)
sum += A[(i + k) * n_color + (j + l) * n_color * A_width + m];
}
}
summed_Arr[i + j * N_s_col] = sum;
}
}
}
|
3fbd19fde277b4dab49d6f5001fd538d4bdd22ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_cuckoo.cuh"
#include "macros.h"
#include <random>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
#include <thrust/gather.h>
#include <thrust/scan.h>
#include <thrust/device_vector.h>
__device__ int hashFunctionDev(int value, int size, int num)
{
return (0xFAB011991 ^ num + num * value) % (size+1);
}
__global__ void cuckooRefillStencilKernel(int2* values, int values_size, bool* stencil, int stencil_size, int hashNum)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= values_size) return;
int2 value = values[idx];
int hash = hashFunctionDev(value.x, stencil_size, hashNum);
stencil[hash] = true;
}
__global__ void cuckooFillKernel(int2* values, int values_size, int2* hashMap, int hashMap_size, int hashNum)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= values_size) return;
int2 value = values[idx];
int hash = hashFunctionDev(value.x, hashMap_size, hashNum);
hashMap[hash] = value;
}
__global__ void cuckooCheckKernel(int2* values, int values_size, int2* hashMap, int hashMap_size, int hashNum, bool* result)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= values_size) return;
int2 value = values[idx];
int hash = hashFunctionDev(value.x, hashMap_size, hashNum);
result[idx] = hashMap[hash].x != value.x;
}
struct is_true
{
__host__ __device__
bool operator()(const bool x)
{
return x;
}
};
__host__ thrust::device_vector<int2> cuckooFillHashMap(int2* values, int size, int2* hashMap, int hashMap_size, int hashNum)
{
bool* stencil;
thrust::device_vector<int2> result_vector;
thrust::device_ptr<int2> hashMap_ptr(hashMap);
int block_size = CUCKOO_HASHING_BLOCK_SIZE;
int block_cnt = (size + block_size - 1) / block_size;
int stencil_size = hashMap_size;
CUDA_CALL( hipMalloc((void**)&stencil, stencil_size*sizeof(bool)) );
CUDA_CALL( hipMemset(stencil, 0, stencil_size*sizeof(bool)) );
thrust::device_ptr<bool> stencil_ptr(stencil);
hipLaunchKernelGGL(( cuckooRefillStencilKernel), dim3(block_size), dim3(block_cnt), 0, 0, values, size, stencil, stencil_size, hashNum);
thrust::copy_if(hashMap_ptr, hashMap_ptr + hashMap_size, stencil_ptr, result_vector.begin(), is_true());
CUDA_CALL( hipFree(stencil) );
hipLaunchKernelGGL(( cuckooFillKernel), dim3(block_size), dim3(block_cnt), 0, 0, values, size, hashMap, hashMap_size, hashNum);
CUDA_CALL( hipMalloc((void**)&stencil, size*sizeof(bool)) );
CUDA_CALL( hipMemset(stencil, 0, size*sizeof(bool)) );
hipDeviceSynchronize();
hipLaunchKernelGGL(( cuckooCheckKernel), dim3(block_size), dim3(block_cnt), 0, 0, values, size, hashMap, hashMap_size, hashNum, stencil);
hipDeviceSynchronize();
thrust::device_ptr<int2> values_ptr(values);
stencil_ptr = thrust::device_pointer_cast(stencil);
thrust::copy_if(values_ptr, values_ptr + size, stencil_ptr, result_vector.begin(), is_true());
CUDA_CALL( hipFree(stencil) );
return result_vector;
}
__host__ int2 genSeeds()
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(1, 6);
int seed_1 = dis(gen);
int seed_2 = dis(gen);
return make_int2(seed_1, seed_2);
}
int2** cuckooHash(int2* values, int in_size, int2& out_size, int2& out_seeds)
{
out_seeds = genSeeds();
int2** hashMaps = new int2*[NUM_HASHES];
int hashMap_size = SLOTS_COEF * in_size;
for(int i = 0; i < NUM_HASHES; i++)
{
CUDA_CALL( hipMalloc((void**)&hashMaps[i], hashMap_size * sizeof(int2)) );
CUDA_CALL( hipMemset(hashMaps[i], 0xFF, hashMap_size * sizeof(int2)) ); // free slot has key and value equal 0xFFFFFFFF
}
auto collisions = cuckooFillHashMap(values, in_size, hashMaps[0], hashMap_size, out_seeds.x);
int i = 1;
while(collisions.size())
{
collisions = cuckooFillHashMap(collisions.data().get(), collisions.size(), hashMaps[i], hashMap_size, out_seeds.y);
i = (i+1)%NUM_HASHES;
}
out_size.x = NUM_HASHES;
out_size.y = hashMap_size;
return hashMaps;
}
int2** cuckooHash(int2* values, int in_size, int2** hashMaps, int2& hashMap_size, int2 seeds)
{
auto collisions = cuckooFillHashMap(values, in_size, hashMaps[0], hashMap_size.y, seeds.x);
int i = 1;
while(collisions.size())
{
collisions = cuckooFillHashMap(collisions.data().get(), collisions.size(), hashMaps[i], hashMap_size.y, i == 1 ? seeds.y : seeds.x);
i = (i+1)%NUM_HASHES;
}
return hashMaps;
}
| 3fbd19fde277b4dab49d6f5001fd538d4bdd22ca.cu | #include "gpu_cuckoo.cuh"
#include "macros.h"
#include <random>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
#include <thrust/gather.h>
#include <thrust/scan.h>
#include <thrust/device_vector.h>
__device__ int hashFunctionDev(int value, int size, int num)
{
return (0xFAB011991 ^ num + num * value) % (size+1);
}
__global__ void cuckooRefillStencilKernel(int2* values, int values_size, bool* stencil, int stencil_size, int hashNum)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= values_size) return;
int2 value = values[idx];
int hash = hashFunctionDev(value.x, stencil_size, hashNum);
stencil[hash] = true;
}
__global__ void cuckooFillKernel(int2* values, int values_size, int2* hashMap, int hashMap_size, int hashNum)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= values_size) return;
int2 value = values[idx];
int hash = hashFunctionDev(value.x, hashMap_size, hashNum);
hashMap[hash] = value;
}
__global__ void cuckooCheckKernel(int2* values, int values_size, int2* hashMap, int hashMap_size, int hashNum, bool* result)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= values_size) return;
int2 value = values[idx];
int hash = hashFunctionDev(value.x, hashMap_size, hashNum);
result[idx] = hashMap[hash].x != value.x;
}
struct is_true
{
__host__ __device__
bool operator()(const bool x)
{
return x;
}
};
__host__ thrust::device_vector<int2> cuckooFillHashMap(int2* values, int size, int2* hashMap, int hashMap_size, int hashNum)
{
bool* stencil;
thrust::device_vector<int2> result_vector;
thrust::device_ptr<int2> hashMap_ptr(hashMap);
int block_size = CUCKOO_HASHING_BLOCK_SIZE;
int block_cnt = (size + block_size - 1) / block_size;
int stencil_size = hashMap_size;
CUDA_CALL( cudaMalloc((void**)&stencil, stencil_size*sizeof(bool)) );
CUDA_CALL( cudaMemset(stencil, 0, stencil_size*sizeof(bool)) );
thrust::device_ptr<bool> stencil_ptr(stencil);
cuckooRefillStencilKernel<<<block_size, block_cnt>>>(values, size, stencil, stencil_size, hashNum);
thrust::copy_if(hashMap_ptr, hashMap_ptr + hashMap_size, stencil_ptr, result_vector.begin(), is_true());
CUDA_CALL( cudaFree(stencil) );
cuckooFillKernel<<<block_size, block_cnt>>>(values, size, hashMap, hashMap_size, hashNum);
CUDA_CALL( cudaMalloc((void**)&stencil, size*sizeof(bool)) );
CUDA_CALL( cudaMemset(stencil, 0, size*sizeof(bool)) );
cudaDeviceSynchronize();
cuckooCheckKernel<<<block_size, block_cnt>>>(values, size, hashMap, hashMap_size, hashNum, stencil);
cudaDeviceSynchronize();
thrust::device_ptr<int2> values_ptr(values);
stencil_ptr = thrust::device_pointer_cast(stencil);
thrust::copy_if(values_ptr, values_ptr + size, stencil_ptr, result_vector.begin(), is_true());
CUDA_CALL( cudaFree(stencil) );
return result_vector;
}
__host__ int2 genSeeds()
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(1, 6);
int seed_1 = dis(gen);
int seed_2 = dis(gen);
return make_int2(seed_1, seed_2);
}
int2** cuckooHash(int2* values, int in_size, int2& out_size, int2& out_seeds)
{
out_seeds = genSeeds();
int2** hashMaps = new int2*[NUM_HASHES];
int hashMap_size = SLOTS_COEF * in_size;
for(int i = 0; i < NUM_HASHES; i++)
{
CUDA_CALL( cudaMalloc((void**)&hashMaps[i], hashMap_size * sizeof(int2)) );
CUDA_CALL( cudaMemset(hashMaps[i], 0xFF, hashMap_size * sizeof(int2)) ); // free slot has key and value equal 0xFFFFFFFF
}
auto collisions = cuckooFillHashMap(values, in_size, hashMaps[0], hashMap_size, out_seeds.x);
int i = 1;
while(collisions.size())
{
collisions = cuckooFillHashMap(collisions.data().get(), collisions.size(), hashMaps[i], hashMap_size, out_seeds.y);
i = (i+1)%NUM_HASHES;
}
out_size.x = NUM_HASHES;
out_size.y = hashMap_size;
return hashMaps;
}
int2** cuckooHash(int2* values, int in_size, int2** hashMaps, int2& hashMap_size, int2 seeds)
{
auto collisions = cuckooFillHashMap(values, in_size, hashMaps[0], hashMap_size.y, seeds.x);
int i = 1;
while(collisions.size())
{
collisions = cuckooFillHashMap(collisions.data().get(), collisions.size(), hashMaps[i], hashMap_size.y, i == 1 ? seeds.y : seeds.x);
i = (i+1)%NUM_HASHES;
}
return hashMaps;
}
|
9e0abe2324ac08d2553d1ef953ba4f7b673dfa51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "KDE.h"
__device__ float epanechnikowKernelCUDA(float x)
{
float res = 0;
if(x <= 1 && x >= -1)
{
res = 3*(1-x*x)/4;
}
return res;
}
__device__ float uniformKernelCUDA(float x)
{
float res = 0;
if(x <= 1 && x >= -1)
{
res = 1.0/2;
}
return res;
}
__device__ float gaussianKernelCUDA(float x)
{
float res = 0;
res = 1.0/sqrt(2*M_PI)*exp(-1.0/2*x*x);
return res;
}
__device__ float getSingleCUDA(float x, float h, int dataSize, float* data, kernel_type kernelType)
{
float result = 0;
for(int i = 0;i<dataSize;i++)
{
float kernel_par =(x-data[i])/h;
if(kernelType == epanechnikov)
{
result += epanechnikowKernelCUDA(kernel_par);
}
else if(kernelType == uniform)
{
result += uniformKernelCUDA(kernel_par);
}
else if(kernelType == gaussian)
{
result += gaussianKernelCUDA(kernel_par);
}
}
result /= (dataSize*h);
return result;
}
__global__ void getRangeCUDA(float* resultTable, int resultSize,float start, float step, float* data, int dataSize, float h, kernel_type kernelType)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < resultSize)
{
float val = start + idx*step;
resultTable[idx] = getSingleCUDA(val,h,dataSize,data,kernelType);
}
}
KDE::KDE(Data* inputData, Data* outputData, float start, float stop, float h, kernel_type kernelType)
{
_kernelType = kernelType;
_inputData = inputData;
_outputData = outputData;
_resultStart = start;
_resultStop = stop;
_h = h;
}
KDE::~KDE()
{
}
float KDE::epanechnikowKernel(float x)
{
float res = 0;
if(x <= 1 && x >= -1)
{
res = 3*(1-x*x)/4;
}
return res;
}
float KDE::uniformKernel(float x)
{
float res = 0;
if(x <= 1 && x >= -1)
{
res = 1.0/2;
}
return res;
}
float KDE::gaussianKernel(float x)
{
float res = 0;
res = 1.0/sqrt(2*M_PI)*exp(-1.0/2*x*x);
return res;
}
float KDE::getSingle(float x, float* data)
{
float res = 0;
for(int i = 0;i<_inputData->getSampleSize();i++)
{
float kernel_par =(x-data[i])/_h;
if(_kernelType == epanechnikov)
{
res += epanechnikowKernel(kernel_par);
}
else if(_kernelType == uniform)
{
res += uniformKernel(kernel_par);
}
else if(_kernelType == gaussian)
{
res += gaussianKernel(kernel_par);
}
}
res /= (_inputData->getSampleSize()*_h);
return res;
}
void KDE::getResult()
{
float step = (_resultStop-_resultStart)/(_outputData->getSampleSize()-1);
for(int j = 0;j<_inputData->getSampleQuantity();j++)
{
float* tempInputData = _inputData->getDataPtr()[j];
float* tempOutputData = _outputData->getDataPtr()[j];
for(int i = 0;i<_outputData->getSampleSize();i++)
{
tempOutputData[i] = getSingle(_resultStart+step*i, tempInputData);
}
//normalize
double sum = 0;
for(int i = 0;i<_outputData->getSampleSize();i++)
{
sum += tempOutputData[i];
}
sum *= step;
for(int i = 0;i<_outputData->getSampleSize();i++)
{
tempOutputData[i] /= sum;
}
}
_outputData->compute();
}
void KDE::getResultCUDA()
{
float step = (_resultStop-_resultStart)/(_outputData->getSampleSize()-1);
int inputDataSize = _inputData->getSampleSize();
int outputDataSize = _outputData->getSampleSize();
int block_size = 512;
int block_num = (outputDataSize + block_size - 1)/block_size;
float *cudaDataTable;
float *cudaResultTable;
hipMalloc((void**)&cudaDataTable,sizeof(float)*inputDataSize);
hipMalloc((void**)&cudaResultTable,sizeof(float)*outputDataSize);
for(int j = 0;j<_inputData->getSampleQuantity();j++)
{
float* tempInputData = _inputData->getDataPtr()[j];
float* tempOutputData = _outputData->getDataPtr()[j];
hipMemcpy(cudaDataTable,tempInputData,sizeof(float)*inputDataSize,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( getRangeCUDA) , dim3(block_num),dim3(block_size), 0, 0, cudaResultTable, outputDataSize, _resultStart, step, cudaDataTable, inputDataSize, _h, _kernelType);
hipMemcpy(tempOutputData,cudaResultTable,sizeof(float)*outputDataSize,hipMemcpyDeviceToHost);
//normalize
double sum = 0;
for(int i = 0;i<_outputData->getSampleSize();i++)
{
sum += tempOutputData[i];
}
sum *= step;
for(int i = 0;i<_outputData->getSampleSize();i++)
{
tempOutputData[i] /= sum;
}
}
hipFree(cudaDataTable);
hipFree(cudaResultTable);
_outputData->compute();
}
void KDE::saveResultToFile(std::string filename)
{
std::ofstream file;
file.open(filename);
float step = (_resultStop-_resultStart)/(_outputData->getSampleSize()-1);
for(int i = 0;i<_outputData->getSampleSize();i++)
{
float mean = _outputData->getMean()[i];
float dev = _outputData->getStdDev()[i]/2;
float x = _resultStart+step*i;
float y = Generator::orginalDoubleGauss(x);
file << x << " " << y << " " << mean-dev << " " << mean << " " << mean+dev << std::endl;
}
file.close();
}
float KDE::getChiSquaredVal()
{
float step = (_resultStop-_resultStart)/(_outputData->getSampleSize()-1);
double sum = 0;
for(int i = 0;i<_outputData->getSampleSize();i++)
{
float mean = _outputData->getMean()[i];
float dev = _outputData->getStdDev()[i];
float x = _resultStart+step*i;
float y = Generator::orginalDoubleGauss(x);
double temp = (mean-y)*(mean-y)/(dev*dev);
sum += temp;
}
return sum;
}
void KDE::saveHistToFile(std::string filename, float* data, int dataSize, int bids)
{
std::ofstream file;
file.open(filename);
float min = data[0];
float max = data[0];
for(int i = 1;i<dataSize;i++)
{
if(data[i] > max)max = data[i];
else if(data[i] < min)min = data[i];
}
double stepSize = (double)(max-min)/bids;
for(int i = 0;i<bids-1;i++)
{
double minRange = min + i*stepSize;
double maxRange = min + (i+1)*stepSize;
int rangeOccurs = 0;
for(int j = 0;j<dataSize;j++)
{
if(data[j] >= minRange && data[j] < maxRange)rangeOccurs++;
}
file << (minRange+maxRange)/2 << " " << rangeOccurs << std::endl;
}
file.close();
}
void KDE::notifyCudaAllocError()
{
std::cout << "CUDA Alloc problem" << std::endl;
_errorOccur = true;
}
void KDE::notifyCudaCpyError()
{
std::cout << "CUDA Memcpy problem" << std::endl;
_errorOccur = true;
}
| 9e0abe2324ac08d2553d1ef953ba4f7b673dfa51.cu | #include "KDE.h"
__device__ float epanechnikowKernelCUDA(float x)
{
float res = 0;
if(x <= 1 && x >= -1)
{
res = 3*(1-x*x)/4;
}
return res;
}
__device__ float uniformKernelCUDA(float x)
{
float res = 0;
if(x <= 1 && x >= -1)
{
res = 1.0/2;
}
return res;
}
__device__ float gaussianKernelCUDA(float x)
{
float res = 0;
res = 1.0/sqrt(2*M_PI)*exp(-1.0/2*x*x);
return res;
}
__device__ float getSingleCUDA(float x, float h, int dataSize, float* data, kernel_type kernelType)
{
float result = 0;
for(int i = 0;i<dataSize;i++)
{
float kernel_par =(x-data[i])/h;
if(kernelType == epanechnikov)
{
result += epanechnikowKernelCUDA(kernel_par);
}
else if(kernelType == uniform)
{
result += uniformKernelCUDA(kernel_par);
}
else if(kernelType == gaussian)
{
result += gaussianKernelCUDA(kernel_par);
}
}
result /= (dataSize*h);
return result;
}
__global__ void getRangeCUDA(float* resultTable, int resultSize,float start, float step, float* data, int dataSize, float h, kernel_type kernelType)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < resultSize)
{
float val = start + idx*step;
resultTable[idx] = getSingleCUDA(val,h,dataSize,data,kernelType);
}
}
KDE::KDE(Data* inputData, Data* outputData, float start, float stop, float h, kernel_type kernelType)
{
_kernelType = kernelType;
_inputData = inputData;
_outputData = outputData;
_resultStart = start;
_resultStop = stop;
_h = h;
}
KDE::~KDE()
{
}
float KDE::epanechnikowKernel(float x)
{
float res = 0;
if(x <= 1 && x >= -1)
{
res = 3*(1-x*x)/4;
}
return res;
}
float KDE::uniformKernel(float x)
{
float res = 0;
if(x <= 1 && x >= -1)
{
res = 1.0/2;
}
return res;
}
float KDE::gaussianKernel(float x)
{
float res = 0;
res = 1.0/sqrt(2*M_PI)*exp(-1.0/2*x*x);
return res;
}
float KDE::getSingle(float x, float* data)
{
float res = 0;
for(int i = 0;i<_inputData->getSampleSize();i++)
{
float kernel_par =(x-data[i])/_h;
if(_kernelType == epanechnikov)
{
res += epanechnikowKernel(kernel_par);
}
else if(_kernelType == uniform)
{
res += uniformKernel(kernel_par);
}
else if(_kernelType == gaussian)
{
res += gaussianKernel(kernel_par);
}
}
res /= (_inputData->getSampleSize()*_h);
return res;
}
void KDE::getResult()
{
float step = (_resultStop-_resultStart)/(_outputData->getSampleSize()-1);
for(int j = 0;j<_inputData->getSampleQuantity();j++)
{
float* tempInputData = _inputData->getDataPtr()[j];
float* tempOutputData = _outputData->getDataPtr()[j];
for(int i = 0;i<_outputData->getSampleSize();i++)
{
tempOutputData[i] = getSingle(_resultStart+step*i, tempInputData);
}
//normalize
double sum = 0;
for(int i = 0;i<_outputData->getSampleSize();i++)
{
sum += tempOutputData[i];
}
sum *= step;
for(int i = 0;i<_outputData->getSampleSize();i++)
{
tempOutputData[i] /= sum;
}
}
_outputData->compute();
}
void KDE::getResultCUDA()
{
float step = (_resultStop-_resultStart)/(_outputData->getSampleSize()-1);
int inputDataSize = _inputData->getSampleSize();
int outputDataSize = _outputData->getSampleSize();
int block_size = 512;
int block_num = (outputDataSize + block_size - 1)/block_size;
float *cudaDataTable;
float *cudaResultTable;
cudaMalloc((void**)&cudaDataTable,sizeof(float)*inputDataSize);
cudaMalloc((void**)&cudaResultTable,sizeof(float)*outputDataSize);
for(int j = 0;j<_inputData->getSampleQuantity();j++)
{
float* tempInputData = _inputData->getDataPtr()[j];
float* tempOutputData = _outputData->getDataPtr()[j];
cudaMemcpy(cudaDataTable,tempInputData,sizeof(float)*inputDataSize,cudaMemcpyHostToDevice);
getRangeCUDA <<<block_num,block_size>>>(cudaResultTable, outputDataSize, _resultStart, step, cudaDataTable, inputDataSize, _h, _kernelType);
cudaMemcpy(tempOutputData,cudaResultTable,sizeof(float)*outputDataSize,cudaMemcpyDeviceToHost);
//normalize
double sum = 0;
for(int i = 0;i<_outputData->getSampleSize();i++)
{
sum += tempOutputData[i];
}
sum *= step;
for(int i = 0;i<_outputData->getSampleSize();i++)
{
tempOutputData[i] /= sum;
}
}
cudaFree(cudaDataTable);
cudaFree(cudaResultTable);
_outputData->compute();
}
void KDE::saveResultToFile(std::string filename)
{
std::ofstream file;
file.open(filename);
float step = (_resultStop-_resultStart)/(_outputData->getSampleSize()-1);
for(int i = 0;i<_outputData->getSampleSize();i++)
{
float mean = _outputData->getMean()[i];
float dev = _outputData->getStdDev()[i]/2;
float x = _resultStart+step*i;
float y = Generator::orginalDoubleGauss(x);
file << x << " " << y << " " << mean-dev << " " << mean << " " << mean+dev << std::endl;
}
file.close();
}
float KDE::getChiSquaredVal()
{
float step = (_resultStop-_resultStart)/(_outputData->getSampleSize()-1);
double sum = 0;
for(int i = 0;i<_outputData->getSampleSize();i++)
{
float mean = _outputData->getMean()[i];
float dev = _outputData->getStdDev()[i];
float x = _resultStart+step*i;
float y = Generator::orginalDoubleGauss(x);
double temp = (mean-y)*(mean-y)/(dev*dev);
sum += temp;
}
return sum;
}
void KDE::saveHistToFile(std::string filename, float* data, int dataSize, int bids)
{
std::ofstream file;
file.open(filename);
float min = data[0];
float max = data[0];
for(int i = 1;i<dataSize;i++)
{
if(data[i] > max)max = data[i];
else if(data[i] < min)min = data[i];
}
double stepSize = (double)(max-min)/bids;
for(int i = 0;i<bids-1;i++)
{
double minRange = min + i*stepSize;
double maxRange = min + (i+1)*stepSize;
int rangeOccurs = 0;
for(int j = 0;j<dataSize;j++)
{
if(data[j] >= minRange && data[j] < maxRange)rangeOccurs++;
}
file << (minRange+maxRange)/2 << " " << rangeOccurs << std::endl;
}
file.close();
}
void KDE::notifyCudaAllocError()
{
std::cout << "CUDA Alloc problem" << std::endl;
_errorOccur = true;
}
void KDE::notifyCudaCpyError()
{
std::cout << "CUDA Memcpy problem" << std::endl;
_errorOccur = true;
}
|
e12338d2846b776251af24b590b6d7d49eb37a0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Device.h"
#include <iostream>
#include "MonteCarlo.h"
using std::cout;
using std::endl;
extern __global__ void monteCarlo(hiprandState_t* tabDevGenerator, int nbFleches, float m,int* ptrDevNx);
extern __global__ void setup_kernel_rand(hiprandState_t* tabDevGenerator, int deviceId);
MonteCarlo::MonteCarlo(int nbFleches,int m,const Grid& grid) :
nbFleches(nbFleches)
{
this->db=grid.db;
this->dg=grid.dg;
this->m=m;
this->sizeOctetPi = sizeof(int);
Device::malloc(&ptrDevNx, sizeOctetPi);
Device::memclear(ptrDevNx, sizeOctetPi);
this->sizeOctetSM = db.x * db.y * db.z * sizeof(int);
int nbThread =grid.threadCounts();
size_t sizeOctet= nbThread*sizeof(hiprandState_t) ;
Device::malloc(&ptrDevGenerator, sizeOctet);
Device::memclear(ptrDevGenerator, sizeOctet);
hipLaunchKernelGGL(( setup_kernel_rand), dim3(dg),dim3(db), 0, 0, ptrDevGenerator, Device::getDeviceId());
}
MonteCarlo::~MonteCarlo(void)
{
Device::free(ptrDevNx);
Device::free(ptrDevGenerator);
}
void MonteCarlo::process()
{
//Device::lastCudaError("Slice (before)"); // temp debug
Device ::printCurrent() ;
hipLaunchKernelGGL(( monteCarlo), dim3(dg),dim3(db), sizeOctetSM, 0, ptrDevGenerator, nbFleches,m,ptrDevNx);// assynchrone
//Device::lastCudaError("Slice (after)"); // temp debug
Device::memcpyDToH(&nbFlechesEnDessous, ptrDevNx, sizeOctetPi);
pi=(float)nbFlechesEnDessous/nbFleches*m;
//hipMemcpy(&pi, ptrDevNx, sizeOctetPi, hipMemcpyDeviceToHost); // barriere synchronisation implicite
//pi /= nbFleches;
}
int MonteCarlo::getCountFleches()
{
return this->nbFlechesEnDessous;
}
float MonteCarlo::getPi()
{
return this->pi;
}
| e12338d2846b776251af24b590b6d7d49eb37a0a.cu | #include "Device.h"
#include <iostream>
#include "MonteCarlo.h"
using std::cout;
using std::endl;
extern __global__ void monteCarlo(curandState* tabDevGenerator, int nbFleches, float m,int* ptrDevNx);
extern __global__ void setup_kernel_rand(curandState* tabDevGenerator, int deviceId);
MonteCarlo::MonteCarlo(int nbFleches,int m,const Grid& grid) :
nbFleches(nbFleches)
{
this->db=grid.db;
this->dg=grid.dg;
this->m=m;
this->sizeOctetPi = sizeof(int);
Device::malloc(&ptrDevNx, sizeOctetPi);
Device::memclear(ptrDevNx, sizeOctetPi);
this->sizeOctetSM = db.x * db.y * db.z * sizeof(int);
int nbThread =grid.threadCounts();
size_t sizeOctet= nbThread*sizeof(curandState) ;
Device::malloc(&ptrDevGenerator, sizeOctet);
Device::memclear(ptrDevGenerator, sizeOctet);
setup_kernel_rand<<<dg,db>>>(ptrDevGenerator, Device::getDeviceId());
}
MonteCarlo::~MonteCarlo(void)
{
Device::free(ptrDevNx);
Device::free(ptrDevGenerator);
}
void MonteCarlo::process()
{
//Device::lastCudaError("Slice (before)"); // temp debug
Device ::printCurrent() ;
monteCarlo<<<dg,db, sizeOctetSM>>>(ptrDevGenerator, nbFleches,m,ptrDevNx);// assynchrone
//Device::lastCudaError("Slice (after)"); // temp debug
Device::memcpyDToH(&nbFlechesEnDessous, ptrDevNx, sizeOctetPi);
pi=(float)nbFlechesEnDessous/nbFleches*m;
//cudaMemcpy(&pi, ptrDevNx, sizeOctetPi, cudaMemcpyDeviceToHost); // barriere synchronisation implicite
//pi /= nbFleches;
}
int MonteCarlo::getCountFleches()
{
return this->nbFlechesEnDessous;
}
float MonteCarlo::getPi()
{
return this->pi;
}
|
bf8736444d31e60fe602195adc52673b5296bbcd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T>
__global__ void
_NesterovUpdate(const int nthreads, const T lr, const T momentum, T* g, T* m);
template <>
__global__ void _NesterovUpdate<float>(
const int nthreads,
const float lr,
const float momentum,
float* g,
float* m) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
float mi = m[i];
float mi_new = m[i] = momentum * mi + lr * g[i];
g[i] = fmaf(momentum, mi_new - mi, mi_new);
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void NesterovUpdate<float, CUDAContext>(
const int count,
const float lr,
const float momentum,
float* g,
float* m,
CUDAContext* ctx) {
hipLaunchKernelGGL(( _NesterovUpdate), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
count, lr, momentum, g, m);
}
} // namespace kernel
} // namespace dragon
#endif // USE_ROCM
| bf8736444d31e60fe602195adc52673b5296bbcd.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T>
__global__ void
_NesterovUpdate(const int nthreads, const T lr, const T momentum, T* g, T* m);
template <>
__global__ void _NesterovUpdate<float>(
const int nthreads,
const float lr,
const float momentum,
float* g,
float* m) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
float mi = m[i];
float mi_new = m[i] = momentum * mi + lr * g[i];
g[i] = fmaf(momentum, mi_new - mi, mi_new);
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void NesterovUpdate<float, CUDAContext>(
const int count,
const float lr,
const float momentum,
float* g,
float* m,
CUDAContext* ctx) {
_NesterovUpdate<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
count, lr, momentum, g, m);
}
} // namespace kernel
} // namespace dragon
#endif // USE_CUDA
|
7463dfc20f4e925d576773f6525f18fdf8a1b1c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "multi1.h"
__global__ void k1_incr(float *data, float val) {
data[threadIdx.x] += val;
}
| 7463dfc20f4e925d576773f6525f18fdf8a1b1c1.cu | #include "multi1.h"
__global__ void k1_incr(float *data, float val) {
data[threadIdx.x] += val;
}
|
6e3e05ca0327b20259dfeb6b62667e5e2f765ed2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by root on 23/03/2020.
//
#include "../Matrix.cuh"
#include <iostream>
__global__ void matrixSubScalarInverse(double *a, double b, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = b-a[y * cc + x];
}
}
Matrix Matrix::subScalarInverse(double m){
static double* c;
c = (double*) calloc(this->Rows*this->Columns,sizeof(double));
//Define os endereoes da memria de vdeo
double *d_a, *d_c;
//Define o tamanho de cada matriz e escalar na memria
long aSize = this->Rows*this->Columns*sizeof(double);
long cSize = this->Rows*this->Columns*sizeof(double);
//Aloca espao na memria de vdeo
hipMalloc((void**)&d_a, aSize);
hipMalloc((void**)&d_c, cSize);
//Move a matriz e o escalar para a memria de vdeo alocada
hipMemcpy(d_a, this->Value, aSize, hipMemcpyHostToDevice);
//Define as dimenses
dim3 dimBlock(32,32); // 32x32 -> 1024 Threads
dim3 dimGrid(this->Rows,this->Columns);
//Efetua a multiplicao
hipLaunchKernelGGL(( matrixSubScalarInverse), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, m, d_c, this->Rows, this->Columns);
//Copia o resultado de volta
hipMemcpy(c, d_c, cSize, hipMemcpyDeviceToHost);
//Limpa a memria de vdeo
hipFree(d_a);
hipFree(d_c);
//Salva
return {this->Columns, this->Rows, c};
}
| 6e3e05ca0327b20259dfeb6b62667e5e2f765ed2.cu | //
// Created by root on 23/03/2020.
//
#include "../Matrix.cuh"
#include <iostream>
__global__ void matrixSubScalarInverse(double *a, double b, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = b-a[y * cc + x];
}
}
Matrix Matrix::subScalarInverse(double m){
static double* c;
c = (double*) calloc(this->Rows*this->Columns,sizeof(double));
//Define os endereçoes da memória de vídeo
double *d_a, *d_c;
//Define o tamanho de cada matriz e escalar na memória
long aSize = this->Rows*this->Columns*sizeof(double);
long cSize = this->Rows*this->Columns*sizeof(double);
//Aloca espaço na memória de vídeo
cudaMalloc((void**)&d_a, aSize);
cudaMalloc((void**)&d_c, cSize);
//Move a matriz e o escalar para a memória de vídeo alocada
cudaMemcpy(d_a, this->Value, aSize, cudaMemcpyHostToDevice);
//Define as dimensões
dim3 dimBlock(32,32); // 32x32 -> 1024 Threads
dim3 dimGrid(this->Rows,this->Columns);
//Efetua a multiplicação
matrixSubScalarInverse<<<dimGrid, dimBlock>>>(d_a, m, d_c, this->Rows, this->Columns);
//Copia o resultado de volta
cudaMemcpy(c, d_c, cSize, cudaMemcpyDeviceToHost);
//Limpa a memória de vídeo
cudaFree(d_a);
cudaFree(d_c);
//Salva
return {this->Columns, this->Rows, c};
}
|
1cee557fdcf1c17f1aeeb6e44c56ccbd6e1fb90f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void kernel_sumar(int *a, int *b, int *c, int *resultado)
/*
Guarda en resultado la suma de a + b + c.
*/
{
*resultado = *a + *b + *c;
}
void sumar_en_cuda(int a, int b, int c, int* resultado)
{
// Variables de la grfica:
int *dev_a;
int *dev_b;
int *dev_c;
// Variable resultado:
int *dev_resultado;
// Reservo memoria en DEVICE para los 3 ints. Nota:(void **) es un parseo de puntero.
hipMalloc((void **)&dev_a, sizeof(int));
hipMalloc((void **)&dev_b, sizeof(int));
hipMalloc((void **)&dev_c, sizeof(int));
hipMalloc((void **)&dev_resultado, sizeof(int));
// Copio contenido del HOST al DEVICE: (No hace falta copiar resultado, pues no tiene an valor).
hipMemcpy(dev_a, &a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, &b, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_c, &c, sizeof(int), hipMemcpyHostToDevice);
// Lanzo el kernel:
hipLaunchKernelGGL(( kernel_sumar) , dim3(100), dim3(100) , 0, 0, dev_a, dev_b, dev_c, dev_resultado);
// Espero a que el kernel termine su ejecucin:
hipDeviceSynchronize();
// Copio de DEVICE a HOST: (guardo en c).
hipMemcpy(resultado, dev_resultado, sizeof(int), hipMemcpyDeviceToHost);
// Libero memoria del DEVICE:
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipFree(dev_resultado);
}
int main()
{
// Variable donde se almacenar el resultado.
int resultado;
// Llamo a la funcin de suma:
sumar_en_cuda(3, 5, 8, &resultado);
// Imprimo resultado:
printf("El resultado es %d", resultado);
return 0;
}
| 1cee557fdcf1c17f1aeeb6e44c56ccbd6e1fb90f.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void kernel_sumar(int *a, int *b, int *c, int *resultado)
/*
Guarda en resultado la suma de a + b + c.
*/
{
*resultado = *a + *b + *c;
}
void sumar_en_cuda(int a, int b, int c, int* resultado)
{
// Variables de la gráfica:
int *dev_a;
int *dev_b;
int *dev_c;
// Variable resultado:
int *dev_resultado;
// Reservo memoria en DEVICE para los 3 ints. Nota:(void **) es un parseo de puntero.
cudaMalloc((void **)&dev_a, sizeof(int));
cudaMalloc((void **)&dev_b, sizeof(int));
cudaMalloc((void **)&dev_c, sizeof(int));
cudaMalloc((void **)&dev_resultado, sizeof(int));
// Copio contenido del HOST al DEVICE: (No hace falta copiar resultado, pues no tiene aún valor).
cudaMemcpy(dev_a, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &b, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, &c, sizeof(int), cudaMemcpyHostToDevice);
// Lanzo el kernel:
kernel_sumar <<<100, 100 >>> (dev_a, dev_b, dev_c, dev_resultado);
// Espero a que el kernel termine su ejecución:
cudaDeviceSynchronize();
// Copio de DEVICE a HOST: (guardo en c).
cudaMemcpy(resultado, dev_resultado, sizeof(int), cudaMemcpyDeviceToHost);
// Libero memoria del DEVICE:
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_resultado);
}
int main()
{
// Variable donde se almacenará el resultado.
int resultado;
// Llamo a la función de suma:
sumar_en_cuda(3, 5, 8, &resultado);
// Imprimo resultado:
printf("El resultado es %d", resultado);
return 0;
}
|
f67226e084f68d0de7459a2a317912ce9a80907b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
// every multiprocessor handles one BCSR-block to copy from A
__global__ void
zbcsrvalcpy_kernel(
int size_b,
magma_int_t num_blocks,
magmaDoubleComplex **Aval,
magmaDoubleComplex **Bval )
{
if(blockIdx.x*65535+blockIdx.y < num_blocks){
magmaDoubleComplex *dA = Aval[ blockIdx.x*65535+blockIdx.y ];
magmaDoubleComplex *dB = Bval[ blockIdx.x*65535+blockIdx.y ];
int i = threadIdx.x;
while( i<size_b*size_b ){
dB[i] = dA[i];
i+=BLOCK_SIZE;
}
}
}
// every multiprocessor handles one BCSR-block to initialize with 0
__global__ void
zbcsrvalzro_kernel(
int size_b,
magma_int_t num_blocks,
magmaDoubleComplex **Bval )
{
if(blockIdx.x*65535+blockIdx.y < num_blocks){
magmaDoubleComplex *dB = Bval[ blockIdx.x*65535+blockIdx.y ];
int i = threadIdx.x;
//dB += i;
while( i<size_b*size_b ){
dB[i] = MAGMA_Z_MAKE(0.0, 0.0);
i+=BLOCK_SIZE;
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine copies the filled blocks
from the original matrix A and initializes the blocks that will later be
filled in the factorization process with zeros.
Arguments
---------
@param[in]
size_b magma_int_t
blocksize in BCSR
@param[in]
num_blocks magma_int_t
number of nonzero blocks
@param[in]
num_zblocks magma_int_t
number of zero-blocks (will later be filled)
@param[in]
Aval magmaDoubleComplex_ptr *
pointers to the nonzero blocks in A
@param[in]
Bval magmaDoubleComplex_ptr *
pointers to the nonzero blocks in B
@param[in]
Bval2 magmaDoubleComplex_ptr *
pointers to the zero blocks in B
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbcsrvalcpy(
magma_int_t size_b,
magma_int_t num_blocks,
magma_int_t num_zblocks,
magmaDoubleComplex_ptr *Aval,
magmaDoubleComplex_ptr *Bval,
magmaDoubleComplex_ptr *Bval2,
magma_queue_t queue )
{
dim3 dimBlock( BLOCK_SIZE, 1, 1 );
// the grids are adapted to the number of nonzero/zero blocks
// the upper block-number the kernels can handle is 65535*65535
int dimgrid1 = 65535;
int dimgrid2 = magma_ceildiv( num_blocks, 65535 );
int dimgrid3 = magma_ceildiv( num_zblocks, 65535 );
dim3 dimGrid( dimgrid2, dimgrid1, 1 );
hipLaunchKernelGGL(( zbcsrvalcpy_kernel), dim3(dimGrid),dim3(dimBlock), 0, queue->cuda_stream() ,
size_b, num_blocks, Aval, Bval );
dim3 dimGrid2( dimgrid3, dimgrid1, 1 );
hipLaunchKernelGGL(( zbcsrvalzro_kernel), dim3(dimGrid2),dim3(dimBlock), 0, queue->cuda_stream() ,
size_b, num_zblocks, Bval2 );
return MAGMA_SUCCESS;
}
| f67226e084f68d0de7459a2a317912ce9a80907b.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
// every multiprocessor handles one BCSR-block to copy from A
__global__ void
zbcsrvalcpy_kernel(
int size_b,
magma_int_t num_blocks,
magmaDoubleComplex **Aval,
magmaDoubleComplex **Bval )
{
if(blockIdx.x*65535+blockIdx.y < num_blocks){
magmaDoubleComplex *dA = Aval[ blockIdx.x*65535+blockIdx.y ];
magmaDoubleComplex *dB = Bval[ blockIdx.x*65535+blockIdx.y ];
int i = threadIdx.x;
while( i<size_b*size_b ){
dB[i] = dA[i];
i+=BLOCK_SIZE;
}
}
}
// every multiprocessor handles one BCSR-block to initialize with 0
__global__ void
zbcsrvalzro_kernel(
int size_b,
magma_int_t num_blocks,
magmaDoubleComplex **Bval )
{
if(blockIdx.x*65535+blockIdx.y < num_blocks){
magmaDoubleComplex *dB = Bval[ blockIdx.x*65535+blockIdx.y ];
int i = threadIdx.x;
//dB += i;
while( i<size_b*size_b ){
dB[i] = MAGMA_Z_MAKE(0.0, 0.0);
i+=BLOCK_SIZE;
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine copies the filled blocks
from the original matrix A and initializes the blocks that will later be
filled in the factorization process with zeros.
Arguments
---------
@param[in]
size_b magma_int_t
blocksize in BCSR
@param[in]
num_blocks magma_int_t
number of nonzero blocks
@param[in]
num_zblocks magma_int_t
number of zero-blocks (will later be filled)
@param[in]
Aval magmaDoubleComplex_ptr *
pointers to the nonzero blocks in A
@param[in]
Bval magmaDoubleComplex_ptr *
pointers to the nonzero blocks in B
@param[in]
Bval2 magmaDoubleComplex_ptr *
pointers to the zero blocks in B
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbcsrvalcpy(
magma_int_t size_b,
magma_int_t num_blocks,
magma_int_t num_zblocks,
magmaDoubleComplex_ptr *Aval,
magmaDoubleComplex_ptr *Bval,
magmaDoubleComplex_ptr *Bval2,
magma_queue_t queue )
{
dim3 dimBlock( BLOCK_SIZE, 1, 1 );
// the grids are adapted to the number of nonzero/zero blocks
// the upper block-number the kernels can handle is 65535*65535
int dimgrid1 = 65535;
int dimgrid2 = magma_ceildiv( num_blocks, 65535 );
int dimgrid3 = magma_ceildiv( num_zblocks, 65535 );
dim3 dimGrid( dimgrid2, dimgrid1, 1 );
zbcsrvalcpy_kernel<<< dimGrid,dimBlock, 0, queue->cuda_stream() >>>
( size_b, num_blocks, Aval, Bval );
dim3 dimGrid2( dimgrid3, dimgrid1, 1 );
zbcsrvalzro_kernel<<< dimGrid2,dimBlock, 0, queue->cuda_stream() >>>
( size_b, num_zblocks, Bval2 );
return MAGMA_SUCCESS;
}
|
19c10726bba9d47b7e91ac63bc97e5dd3267c598.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "CNNConvLayer.h"
using namespace std;
// This is the CPU version, please don't modify it
void convLayerCPU()
{
// declarations for bunch of indexing parameters
int fn, sli, fmy, fmx, y, x;
int ifmy, ifmx, ofmy, ofmx;
int filtIdx, inNeuIdx, outNeuIdx, outIdx;
int filtVol = FMDEPTH * FILTSIZE * FILTSIZE;
int fmArea = FMSIZE * FMSIZE;
int filtArea = FILTSIZE * FILTSIZE;
int outArea = FMSIZE/3 * FMSIZE/3;
int sum;
// Convolution
for(fn = 0; fn < FILTNUM; fn++){ //iterate through each filters
for(fmy = 0; fmy < FMSIZE; fmy += STRIDE){ //Stride through
for(fmx = 0; fmx < FMSIZE; fmx += STRIDE){ //Stride through
sum = 0;
for(sli = 0; sli < FMDEPTH; sli++){ //Iterate through depth
//Convolution
for(y = 0; y < FILTSIZE; y++){
for(x = 0; x < FILTSIZE; x++){
ifmy = fmy - FILTSIZE / 2 + y;
ifmx = fmx - FILTSIZE / 2 + x;
filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x;
inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx;
if(ifmy >= 0 && ifmy < FMSIZE && ifmx >= 0 && ifmx < FMSIZE)
sum += filt[filtIdx] * inNeu[inNeuIdx];
//"filt" is a giant array that stores all of the parameters of all the filters
//size = 307200
//inNeu size = 69984
//What's tricky here is that filter weighting and input neurons are all int
}
}
}
// Activation - ReLU <- Don't pronounce it wrong
outNeuIdx = fn*fmArea + fmy*FMSIZE + fmx;
if(sum <= 0)
outNeu[outNeuIdx] = 0;
else
outNeu[outNeuIdx] = sum;
}
}
}
// Max Pooling with Window Size 3x3 and stride 3
int max, tmpVal;
for(sli = 0; sli < FILTNUM; sli++){
for(fmy = 0; fmy < FMSIZE/3 ; fmy += 1){
for(fmx = 0; fmx < FMSIZE/3 ; fmx += 1){
outNeuIdx = sli*fmArea + fmy*3*FMSIZE + fmx*3;
max = outNeu[outNeuIdx];
for(y = 0; y < 3; y++){
for(x = 0; x < 3; x++){
ofmy = fmy*3 + y;
ofmx = fmx*3 + x;
outNeuIdx = sli*fmArea + ofmy*FMSIZE + ofmx;
tmpVal = outNeu[outNeuIdx];
if(tmpVal > max)
max = tmpVal;
}
}
outIdx = sli*outArea + fmy*FMSIZE/3 + fmx;
outCPU[outIdx] = max;
}
}
}
}
/*** Implement your CUDA Kernel here ***/
__global__
void convLayerGPU(int* filt_GPU, int* inNeu_GPU, int* out_GPU_kernel)
{
// declarations for bunch of indexing parameters
int fn, sli, fmy, fmx, y, x;
int ifmy, ifmx;
int filtIdx, inNeuIdx, outIdx;
int filtVol = FMDEPTH * FILTSIZE * FILTSIZE;
int fmArea = FMSIZE * FMSIZE;
int filtArea = FILTSIZE * FILTSIZE;
int outArea = FMSIZE/3 * FMSIZE/3;
int sum;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < outArea)
out_GPU_kernel[i] = 0;
if(i < FILTNUM*FMSIZE*FMSIZE){
sum = 0;
fn = i/FMSIZE/FMSIZE;
for(sli = 0; sli < FMDEPTH; sli++){
for(y = 0; y < FILTSIZE; y++){
for(x = 0; x < FILTSIZE; x++){
fmy = (i/FMSIZE)%FMSIZE;
fmx = i%FMSIZE;
ifmy = fmy - FILTSIZE / 2 + y;
ifmx = fmx - FILTSIZE / 2 + x;
filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x;
inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx;
if(ifmy >= 0 && ifmy < FMSIZE && ifmx >= 0 && ifmx < FMSIZE)
sum += filt_GPU[filtIdx] * inNeu_GPU[inNeuIdx];
}
}
}
__syncthreads();
for (y = 0; y < 3; y++){
for (x = 0; x < 3; x++) {
if (fmy % 3 == y && fmx % 3 == x){
outIdx = fn*fmArea + fmy/3*FMSIZE/3 + fmx/3;
int tmp = out_GPU_kernel[outIdx];
if (sum > tmp)
out_GPU_kernel[outIdx] = sum;
}
__syncthreads();
}
}
}
// __syncthreads();
// Max Pooling with Window Size 3x3 and stride 3
/* int max = 0;
if(i < FILTNUM * (FMSIZE/3) * (FMSIZE/3)){
int tmpVal;
sli = i/(FMSIZE/3*FMSIZE/3);
fmy = (i%(FMSIZE/3*FMSIZE/3))/(FMSIZE/3);
fmx = i%(FMSIZE/3);
outNeuIdx = sli*fmArea + fmy*3*FMSIZE + fmx*3;
for(y = 0; y < 3; y++){
for(x = 0; x < 3; x++){
ofmy = fmy*3 + y;
ofmx = fmx*3 + x;
outNeuIdx = sli*fmArea + ofmy*FMSIZE + ofmx;
tmpVal = out_Neu_kernel[outNeuIdx];
if(tmpVal > max)
max = tmpVal;
}
}
out_GPU_kernel[i] = max;
}
*/
}
/*** Implement your CUDA Kernel here ***/
int main()
{
//variables setting and loading input data
timespec time_begin, time_end;
int convLayerCPUExecTime, convLayerGPUExecTime;
init();
/******** Added ********/
int* filt_GPU;
int* inNeu_GPU;
int* out_GPU_kernel;
// int* out_Neu_kernel;
//Convolution by CPU
clock_gettime(CLOCK_REALTIME, &time_begin);
convLayerCPU();
clock_gettime(CLOCK_REALTIME, &time_end);
convLayerCPUExecTime = timespec_diff_us(time_begin, time_end);
cout << "CPU time for executing a typical convolutional layer = " << ((float)convLayerCPUExecTime)/1000 << "ms" << endl;
//Convolution by GPU
clock_gettime(CLOCK_REALTIME, &time_begin);
hipMalloc(&inNeu_GPU, FMSIZE*FMSIZE*FMDEPTH*sizeof(int));
hipMalloc(&filt_GPU, FILTSIZE*FILTSIZE*FMDEPTH*FILTNUM*sizeof(int));
hipMalloc(&out_GPU_kernel, FILTNUM * FMSIZE/3 * FMSIZE/3*sizeof(int));
hipMemcpy(filt_GPU, filt, FILTSIZE*FILTSIZE*FMDEPTH*FILTNUM*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(inNeu_GPU, inNeu, FMSIZE*FMSIZE*FMDEPTH*sizeof(int), hipMemcpyHostToDevice);
/******** Added ********/
/*** Lunch your CUDA Kernel here ***/
hipLaunchKernelGGL(( convLayerGPU), dim3((FILTNUM*FMSIZE*FMSIZE+8191)/8192),dim3(8192), 0, 0, filt_GPU, inNeu_GPU, out_GPU_kernel); // Lunch the kernel
hipDeviceSynchronize(); // Do synchronization before clock_gettime()
hipMemcpy(outGPU, out_GPU_kernel, FILTNUM * FMSIZE/3 * FMSIZE/3*sizeof(int), hipMemcpyDeviceToHost);
/*** Lunch your CUDA Kernel here ***/
clock_gettime(CLOCK_REALTIME, &time_end);
convLayerGPUExecTime = timespec_diff_us(time_begin, time_end);
cout << "GPU time for executing a typical convolutional layer = " << ((float)convLayerGPUExecTime)/1000 << "ms" << endl;
//check the anser from CPU and from GPU
if(checker()){
cout << "Congratulations! You pass the check." << endl;
cout << "Speedup: " << (float)convLayerCPUExecTime / convLayerGPUExecTime << endl;
}
else
cout << "Hummm there's something wrong" << endl;
/******** Added ********/
hipFree(filt_GPU);
hipFree(inNeu_GPU);
hipFree(out_GPU_kernel);
/******** Added ********/
//release memory space
ending();
return 0;
}
| 19c10726bba9d47b7e91ac63bc97e5dd3267c598.cu | #include <iostream>
#include "CNNConvLayer.h"
using namespace std;
// This is the CPU version, please don't modify it
void convLayerCPU()
{
// declarations for bunch of indexing parameters
int fn, sli, fmy, fmx, y, x;
int ifmy, ifmx, ofmy, ofmx;
int filtIdx, inNeuIdx, outNeuIdx, outIdx;
int filtVol = FMDEPTH * FILTSIZE * FILTSIZE;
int fmArea = FMSIZE * FMSIZE;
int filtArea = FILTSIZE * FILTSIZE;
int outArea = FMSIZE/3 * FMSIZE/3;
int sum;
// Convolution
for(fn = 0; fn < FILTNUM; fn++){ //iterate through each filters
for(fmy = 0; fmy < FMSIZE; fmy += STRIDE){ //Stride through
for(fmx = 0; fmx < FMSIZE; fmx += STRIDE){ //Stride through
sum = 0;
for(sli = 0; sli < FMDEPTH; sli++){ //Iterate through depth
//Convolution
for(y = 0; y < FILTSIZE; y++){
for(x = 0; x < FILTSIZE; x++){
ifmy = fmy - FILTSIZE / 2 + y;
ifmx = fmx - FILTSIZE / 2 + x;
filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x;
inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx;
if(ifmy >= 0 && ifmy < FMSIZE && ifmx >= 0 && ifmx < FMSIZE)
sum += filt[filtIdx] * inNeu[inNeuIdx];
//"filt" is a giant array that stores all of the parameters of all the filters
//size = 307200
//inNeu size = 69984
//What's tricky here is that filter weighting and input neurons are all int
}
}
}
// Activation - ReLU <- Don't pronounce it wrong
outNeuIdx = fn*fmArea + fmy*FMSIZE + fmx;
if(sum <= 0)
outNeu[outNeuIdx] = 0;
else
outNeu[outNeuIdx] = sum;
}
}
}
// Max Pooling with Window Size 3x3 and stride 3
int max, tmpVal;
for(sli = 0; sli < FILTNUM; sli++){
for(fmy = 0; fmy < FMSIZE/3 ; fmy += 1){
for(fmx = 0; fmx < FMSIZE/3 ; fmx += 1){
outNeuIdx = sli*fmArea + fmy*3*FMSIZE + fmx*3;
max = outNeu[outNeuIdx];
for(y = 0; y < 3; y++){
for(x = 0; x < 3; x++){
ofmy = fmy*3 + y;
ofmx = fmx*3 + x;
outNeuIdx = sli*fmArea + ofmy*FMSIZE + ofmx;
tmpVal = outNeu[outNeuIdx];
if(tmpVal > max)
max = tmpVal;
}
}
outIdx = sli*outArea + fmy*FMSIZE/3 + fmx;
outCPU[outIdx] = max;
}
}
}
}
/*** Implement your CUDA Kernel here ***/
__global__
void convLayerGPU(int* filt_GPU, int* inNeu_GPU, int* out_GPU_kernel)
{
// declarations for bunch of indexing parameters
int fn, sli, fmy, fmx, y, x;
int ifmy, ifmx;
int filtIdx, inNeuIdx, outIdx;
int filtVol = FMDEPTH * FILTSIZE * FILTSIZE;
int fmArea = FMSIZE * FMSIZE;
int filtArea = FILTSIZE * FILTSIZE;
int outArea = FMSIZE/3 * FMSIZE/3;
int sum;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < outArea)
out_GPU_kernel[i] = 0;
if(i < FILTNUM*FMSIZE*FMSIZE){
sum = 0;
fn = i/FMSIZE/FMSIZE;
for(sli = 0; sli < FMDEPTH; sli++){
for(y = 0; y < FILTSIZE; y++){
for(x = 0; x < FILTSIZE; x++){
fmy = (i/FMSIZE)%FMSIZE;
fmx = i%FMSIZE;
ifmy = fmy - FILTSIZE / 2 + y;
ifmx = fmx - FILTSIZE / 2 + x;
filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x;
inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx;
if(ifmy >= 0 && ifmy < FMSIZE && ifmx >= 0 && ifmx < FMSIZE)
sum += filt_GPU[filtIdx] * inNeu_GPU[inNeuIdx];
}
}
}
__syncthreads();
for (y = 0; y < 3; y++){
for (x = 0; x < 3; x++) {
if (fmy % 3 == y && fmx % 3 == x){
outIdx = fn*fmArea + fmy/3*FMSIZE/3 + fmx/3;
int tmp = out_GPU_kernel[outIdx];
if (sum > tmp)
out_GPU_kernel[outIdx] = sum;
}
__syncthreads();
}
}
}
// __syncthreads();
// Max Pooling with Window Size 3x3 and stride 3
/* int max = 0;
if(i < FILTNUM * (FMSIZE/3) * (FMSIZE/3)){
int tmpVal;
sli = i/(FMSIZE/3*FMSIZE/3);
fmy = (i%(FMSIZE/3*FMSIZE/3))/(FMSIZE/3);
fmx = i%(FMSIZE/3);
outNeuIdx = sli*fmArea + fmy*3*FMSIZE + fmx*3;
for(y = 0; y < 3; y++){
for(x = 0; x < 3; x++){
ofmy = fmy*3 + y;
ofmx = fmx*3 + x;
outNeuIdx = sli*fmArea + ofmy*FMSIZE + ofmx;
tmpVal = out_Neu_kernel[outNeuIdx];
if(tmpVal > max)
max = tmpVal;
}
}
out_GPU_kernel[i] = max;
}
*/
}
/*** Implement your CUDA Kernel here ***/
int main()
{
//variables setting and loading input data
timespec time_begin, time_end;
int convLayerCPUExecTime, convLayerGPUExecTime;
init();
/******** Added ********/
int* filt_GPU;
int* inNeu_GPU;
int* out_GPU_kernel;
// int* out_Neu_kernel;
//Convolution by CPU
clock_gettime(CLOCK_REALTIME, &time_begin);
convLayerCPU();
clock_gettime(CLOCK_REALTIME, &time_end);
convLayerCPUExecTime = timespec_diff_us(time_begin, time_end);
cout << "CPU time for executing a typical convolutional layer = " << ((float)convLayerCPUExecTime)/1000 << "ms" << endl;
//Convolution by GPU
clock_gettime(CLOCK_REALTIME, &time_begin);
cudaMalloc(&inNeu_GPU, FMSIZE*FMSIZE*FMDEPTH*sizeof(int));
cudaMalloc(&filt_GPU, FILTSIZE*FILTSIZE*FMDEPTH*FILTNUM*sizeof(int));
cudaMalloc(&out_GPU_kernel, FILTNUM * FMSIZE/3 * FMSIZE/3*sizeof(int));
cudaMemcpy(filt_GPU, filt, FILTSIZE*FILTSIZE*FMDEPTH*FILTNUM*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(inNeu_GPU, inNeu, FMSIZE*FMSIZE*FMDEPTH*sizeof(int), cudaMemcpyHostToDevice);
/******** Added ********/
/*** Lunch your CUDA Kernel here ***/
convLayerGPU<<<(FILTNUM*FMSIZE*FMSIZE+8191)/8192,8192>>>(filt_GPU, inNeu_GPU, out_GPU_kernel); // Lunch the kernel
cudaDeviceSynchronize(); // Do synchronization before clock_gettime()
cudaMemcpy(outGPU, out_GPU_kernel, FILTNUM * FMSIZE/3 * FMSIZE/3*sizeof(int), cudaMemcpyDeviceToHost);
/*** Lunch your CUDA Kernel here ***/
clock_gettime(CLOCK_REALTIME, &time_end);
convLayerGPUExecTime = timespec_diff_us(time_begin, time_end);
cout << "GPU time for executing a typical convolutional layer = " << ((float)convLayerGPUExecTime)/1000 << "ms" << endl;
//check the anser from CPU and from GPU
if(checker()){
cout << "Congratulations! You pass the check." << endl;
cout << "Speedup: " << (float)convLayerCPUExecTime / convLayerGPUExecTime << endl;
}
else
cout << "Hummm there's something wrong" << endl;
/******** Added ********/
cudaFree(filt_GPU);
cudaFree(inNeu_GPU);
cudaFree(out_GPU_kernel);
/******** Added ********/
//release memory space
ending();
return 0;
}
|
66a7f1c0c639d2e24f2a7b2c105763d0af5f6d8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
#include <float.h>
#include <limits.h>
__global__ void minmax_kernel(const float* const d_input,float* d_output, size_t size, bool minmax){
extern __shared__ float shard[];
int index = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
if(index < size){
shard[tid] = d_input[index];
}else{
if(minmax==0){
shard[tid] = FLT_MAX;
}else{
shard[tid] = -FLT_MAX;
}
}
// wait for data copy
__syncthreads();
if(index > size){
// printf("index > size \n");
return;
}
for(int s = blockDim.x / 2; s > 0; s/=2){
if(tid < s){
if(minmax == 0){
shard[tid] = min(shard[tid],shard[tid+s]);
}else{
shard[tid] = max(shard[tid],shard[tid+s]);
}
}
__syncthreads();
}
if(tid == 0){
d_output[blockIdx.x] = shard[0];
}
}
__global__ void histogram_kernel(unsigned int* d_bins,const float* const d_logLuminance, int numBins,float min_logLum, float max_logLum, size_t size){
int index = threadIdx.x + blockDim.x * blockIdx.x;
if(index >= size){
return;
}
int bin_index;
bin_index = (d_logLuminance[index] - min_logLum) / (max_logLum - min_logLum) * numBins;
atomicAdd(&d_bins[bin_index],1);
}
__global__ void scan_kernel(unsigned int *d_bins, int size){
int index = threadIdx.x + blockDim.x * blockIdx.x;
if(index >= size){
return;
}
int val = 0;
int self_val = d_bins[index];
for(int s = 1; s<=size; s *=2){
int pre = index - s;
if(pre >= 0)
val = d_bins[pre];
__syncthreads();
if(pre >= 0)
d_bins[index] += val;
__syncthreads();
}
d_bins[index] -= self_val;
}
float cal_size(int a, int b){
return (int) ceil((float)a/(float)b);
}
float minmax(const float* const d_input,const size_t size, bool minmax){
const int BLOCK_WIDTH = 32;
size_t cur_size = size;
float *d_cur_input;
checkCudaErrors(hipMalloc(&d_cur_input,sizeof(float)*size));
checkCudaErrors(hipMemcpy(d_cur_input, d_input, sizeof(float)*size, hipMemcpyDeviceToDevice));
float * d_cur_output;
const int shared_mem_size = BLOCK_WIDTH * sizeof(float);
while(1){
checkCudaErrors(hipMalloc(&d_cur_output,sizeof(float)*cal_size(cur_size,BLOCK_WIDTH)));
hipLaunchKernelGGL(( minmax_kernel), dim3(cal_size(cur_size,BLOCK_WIDTH)), dim3(BLOCK_WIDTH), shared_mem_size, 0,
d_cur_input,
d_cur_output,
cur_size,
minmax
);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_cur_input));
d_cur_input = d_cur_output;
if(cur_size < BLOCK_WIDTH)
break;
cur_size = cal_size(cur_size,BLOCK_WIDTH);
}
float h_output;
checkCudaErrors(hipMemcpy(&h_output, d_cur_output, sizeof(float), hipMemcpyDeviceToHost));
return h_output;
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
*/
const size_t size = numRows * numCols;
min_logLum = minmax(d_logLuminance, size, 0);
max_logLum = minmax(d_logLuminance, size, 1);
printf("got min of %f\n", min_logLum);
printf("got max of %f\n", max_logLum);
printf("numBins %d\n", numBins);
/*
2) subtract them to find the range
*/
/*
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
*/
unsigned int* d_bins;
size_t histo_size = sizeof(unsigned int) * numBins;
checkCudaErrors(hipMalloc(&d_bins,histo_size));
checkCudaErrors(hipMemset(d_bins,0,histo_size));
dim3 thread_dim(1024);
dim3 hist_block_dim(cal_size(size, thread_dim.x));
hipLaunchKernelGGL(( histogram_kernel), dim3(hist_block_dim), dim3(thread_dim), 0, 0, d_bins, d_logLuminance, numBins, min_logLum, max_logLum, size);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
unsigned int h_out[100];
hipMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, hipMemcpyDeviceToHost);
for(int i = 0; i < 10; i++)
printf("hist out %d\n", h_out[i]);
/*
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you)
*/
dim3 scan_block_dim(cal_size(numBins, thread_dim.x));
hipLaunchKernelGGL(( scan_kernel), dim3(scan_block_dim), dim3(thread_dim), 0, 0, d_bins, numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, hipMemcpyDeviceToHost);
for(int i = 0; i < 10; i++)
printf("cdf out %d\n", h_out[i]);
hipMemcpy(d_cdf, d_bins, histo_size, hipMemcpyDeviceToDevice);
checkCudaErrors(hipFree(d_bins));
//
}
| 66a7f1c0c639d2e24f2a7b2c105763d0af5f6d8b.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
#include <float.h>
#include <limits.h>
__global__ void minmax_kernel(const float* const d_input,float* d_output, size_t size, bool minmax){
extern __shared__ float shard[];
int index = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
if(index < size){
shard[tid] = d_input[index];
}else{
if(minmax==0){
shard[tid] = FLT_MAX;
}else{
shard[tid] = -FLT_MAX;
}
}
// wait for data copy
__syncthreads();
if(index > size){
// printf("index > size \n");
return;
}
for(int s = blockDim.x / 2; s > 0; s/=2){
if(tid < s){
if(minmax == 0){
shard[tid] = min(shard[tid],shard[tid+s]);
}else{
shard[tid] = max(shard[tid],shard[tid+s]);
}
}
__syncthreads();
}
if(tid == 0){
d_output[blockIdx.x] = shard[0];
}
}
__global__ void histogram_kernel(unsigned int* d_bins,const float* const d_logLuminance, int numBins,float min_logLum, float max_logLum, size_t size){
int index = threadIdx.x + blockDim.x * blockIdx.x;
if(index >= size){
return;
}
int bin_index;
bin_index = (d_logLuminance[index] - min_logLum) / (max_logLum - min_logLum) * numBins;
atomicAdd(&d_bins[bin_index],1);
}
__global__ void scan_kernel(unsigned int *d_bins, int size){
int index = threadIdx.x + blockDim.x * blockIdx.x;
if(index >= size){
return;
}
int val = 0;
int self_val = d_bins[index];
for(int s = 1; s<=size; s *=2){
int pre = index - s;
if(pre >= 0)
val = d_bins[pre];
__syncthreads();
if(pre >= 0)
d_bins[index] += val;
__syncthreads();
}
d_bins[index] -= self_val;
}
float cal_size(int a, int b){
return (int) ceil((float)a/(float)b);
}
float minmax(const float* const d_input,const size_t size, bool minmax){
const int BLOCK_WIDTH = 32;
size_t cur_size = size;
float *d_cur_input;
checkCudaErrors(cudaMalloc(&d_cur_input,sizeof(float)*size));
checkCudaErrors(cudaMemcpy(d_cur_input, d_input, sizeof(float)*size, cudaMemcpyDeviceToDevice));
float * d_cur_output;
const int shared_mem_size = BLOCK_WIDTH * sizeof(float);
while(1){
checkCudaErrors(cudaMalloc(&d_cur_output,sizeof(float)*cal_size(cur_size,BLOCK_WIDTH)));
minmax_kernel<<<cal_size(cur_size,BLOCK_WIDTH), BLOCK_WIDTH, shared_mem_size>>>(
d_cur_input,
d_cur_output,
cur_size,
minmax
);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_cur_input));
d_cur_input = d_cur_output;
if(cur_size < BLOCK_WIDTH)
break;
cur_size = cal_size(cur_size,BLOCK_WIDTH);
}
float h_output;
checkCudaErrors(cudaMemcpy(&h_output, d_cur_output, sizeof(float), cudaMemcpyDeviceToHost));
return h_output;
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
*/
const size_t size = numRows * numCols;
min_logLum = minmax(d_logLuminance, size, 0);
max_logLum = minmax(d_logLuminance, size, 1);
printf("got min of %f\n", min_logLum);
printf("got max of %f\n", max_logLum);
printf("numBins %d\n", numBins);
/*
2) subtract them to find the range
*/
/*
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
*/
unsigned int* d_bins;
size_t histo_size = sizeof(unsigned int) * numBins;
checkCudaErrors(cudaMalloc(&d_bins,histo_size));
checkCudaErrors(cudaMemset(d_bins,0,histo_size));
dim3 thread_dim(1024);
dim3 hist_block_dim(cal_size(size, thread_dim.x));
histogram_kernel<<<hist_block_dim, thread_dim>>>(d_bins, d_logLuminance, numBins, min_logLum, max_logLum, size);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
unsigned int h_out[100];
cudaMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, cudaMemcpyDeviceToHost);
for(int i = 0; i < 10; i++)
printf("hist out %d\n", h_out[i]);
/*
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you)
*/
dim3 scan_block_dim(cal_size(numBins, thread_dim.x));
scan_kernel<<<scan_block_dim, thread_dim>>>(d_bins, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
cudaMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, cudaMemcpyDeviceToHost);
for(int i = 0; i < 10; i++)
printf("cdf out %d\n", h_out[i]);
cudaMemcpy(d_cdf, d_bins, histo_size, cudaMemcpyDeviceToDevice);
checkCudaErrors(cudaFree(d_bins));
//
}
|
b7f64a676fe8a4704131586a50ce0f8175486b50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <sstream>
using namespace std;
#define gpuErrchk(ans){gpuAssert((ans), __FILE__, __LINE__);}
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if(code != hipSuccess)
{
fprintf(stderr, "GPUassert : %s %s %d\n", hipGetErrorString(code), file, line);
if(abort) exit(code);
}
}
__global__ void dipoleCorrelation(double *px, double *py, double *pz, double *corr, int N)
{
int tau = threadIdx.x + blockDim.x * blockIdx.x;
double local_corr = 0;
if(tau < N)
{
for(int index = 0; index < N - tau; ++index)
{
local_corr += px[index] * px[index + tau]
+ py[index] * py[index + tau]
+ pz[index] * pz[index + tau];
}
local_corr = local_corr/(N-tau);
corr[tau] = local_corr;
}
__syncthreads();
}
int main()
{
string data, line, word;
int pos(8);
vector< double > dipole_x, dipole_y, dipole_z;
vector< double > t;
const string fileName = "Platinum_nanosphere_run2.stat";
const string fileOut = "CorrfuncCuda.wcorr";
ifstream file;
//open file
file.open(fileName,ios::in);
if(!file)
{
cout<<"Error in opening file"<<endl;
return -1;
}
while(!file.eof())
{
getline(file, line);
int i = 0;
stringstream is(line);
while( is >> word )
{
if (word.compare("#") == 0 || word.compare("##") == 0 ) break;
if(i == 0) t.push_back(stod(word));
if(i == pos)
{
dipole_x.push_back(stod(word));
}
if(i == pos + 1)
{
dipole_y.push_back(stod(word));
}
if(i == pos + 2)
{
dipole_z.push_back(stod(word));
}
i++;
}
}
cout<<"Dipole vector list created"<<endl;
//vector<double> dipole_corr, corr_time;
// calculation of co-orelation function
ofstream outfile;
outfile.open(fileOut);
int N = dipole_x.size();
double *xcomp_dipole = &dipole_x[0]; //convert dipole_x vector to array
double *ycomp_dipole = &dipole_y[0];
double *zcomp_dipole = &dipole_z[0];
double *xcomp_dipole_d, *ycomp_dipole_d, *zcomp_dipole_d;
double *corr_h, *corr_d;
corr_h = (double*)malloc(N*sizeof(double));
double dt = t[1]-t[0];
cout<<"Finding the correlation funciton"<<endl;
gpuErrchk(hipMalloc((void**)&xcomp_dipole_d, N * sizeof(double)));
gpuErrchk(hipMalloc((void**)&ycomp_dipole_d, N * sizeof(double)));
gpuErrchk(hipMalloc((void**)&zcomp_dipole_d, N * sizeof(double)));
gpuErrchk(hipMalloc((void**)&corr_d, N * sizeof(double)));
/*
for(int index =0; index < N ; ++index)
{
printf("Index: %d Px: %e, Py: %e, Pz: %e\n",index,xcomp_dipole[index],ycomp_dipole[index],zcomp_dipole[index]);
printf("Index: %d Px: %e, Py: %e, Pz: %e\n",index,dipole_x[index],dipole_y[index],dipole_z[index]);
}
*/
gpuErrchk(hipMemcpy(xcomp_dipole_d, xcomp_dipole, N * sizeof(double), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(ycomp_dipole_d, ycomp_dipole, N * sizeof(double), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(zcomp_dipole_d, zcomp_dipole, N * sizeof(double), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(corr_d, corr_h, N * sizeof(double), hipMemcpyHostToDevice));
int number_of_blocks;
number_of_blocks = ( N/1024 ) + 1;
hipLaunchKernelGGL((
dipoleCorrelation), dim3(number_of_blocks),dim3(1024), 0, 0, xcomp_dipole_d, ycomp_dipole_d, zcomp_dipole_d, corr_d, N);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(corr_h, corr_d, N * sizeof(double), hipMemcpyDeviceToHost));
outfile<<"## charge velocity autocorrelation function"<<endl;
outfile<<"# time(tau)\t wcorr"<<endl;
for(int count= 0; count < N ; ++count )
{
outfile << t[count] << "\t" << corr_h[count]<<endl;
// cout << t[count] << "\t" << corr_h[count]<<endl;
//dipole_corr.push_back(local_corr/(length - tau));
//corr_time.push_back(tau * dt);
}
outfile.close();
delete [] corr_h;
corr_h = NULL;
gpuErrchk(hipFree(corr_d));
}
| b7f64a676fe8a4704131586a50ce0f8175486b50.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <sstream>
using namespace std;
#define gpuErrchk(ans){gpuAssert((ans), __FILE__, __LINE__);}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if(code != cudaSuccess)
{
fprintf(stderr, "GPUassert : %s %s %d\n", cudaGetErrorString(code), file, line);
if(abort) exit(code);
}
}
__global__ void dipoleCorrelation(double *px, double *py, double *pz, double *corr, int N)
{
int tau = threadIdx.x + blockDim.x * blockIdx.x;
double local_corr = 0;
if(tau < N)
{
for(int index = 0; index < N - tau; ++index)
{
local_corr += px[index] * px[index + tau]
+ py[index] * py[index + tau]
+ pz[index] * pz[index + tau];
}
local_corr = local_corr/(N-tau);
corr[tau] = local_corr;
}
__syncthreads();
}
int main()
{
string data, line, word;
int pos(8);
vector< double > dipole_x, dipole_y, dipole_z;
vector< double > t;
const string fileName = "Platinum_nanosphere_run2.stat";
const string fileOut = "CorrfuncCuda.wcorr";
ifstream file;
//open file
file.open(fileName,ios::in);
if(!file)
{
cout<<"Error in opening file"<<endl;
return -1;
}
while(!file.eof())
{
getline(file, line);
int i = 0;
stringstream is(line);
while( is >> word )
{
if (word.compare("#") == 0 || word.compare("##") == 0 ) break;
if(i == 0) t.push_back(stod(word));
if(i == pos)
{
dipole_x.push_back(stod(word));
}
if(i == pos + 1)
{
dipole_y.push_back(stod(word));
}
if(i == pos + 2)
{
dipole_z.push_back(stod(word));
}
i++;
}
}
cout<<"Dipole vector list created"<<endl;
//vector<double> dipole_corr, corr_time;
// calculation of co-orelation function
ofstream outfile;
outfile.open(fileOut);
int N = dipole_x.size();
double *xcomp_dipole = &dipole_x[0]; //convert dipole_x vector to array
double *ycomp_dipole = &dipole_y[0];
double *zcomp_dipole = &dipole_z[0];
double *xcomp_dipole_d, *ycomp_dipole_d, *zcomp_dipole_d;
double *corr_h, *corr_d;
corr_h = (double*)malloc(N*sizeof(double));
double dt = t[1]-t[0];
cout<<"Finding the correlation funciton"<<endl;
gpuErrchk(cudaMalloc((void**)&xcomp_dipole_d, N * sizeof(double)));
gpuErrchk(cudaMalloc((void**)&ycomp_dipole_d, N * sizeof(double)));
gpuErrchk(cudaMalloc((void**)&zcomp_dipole_d, N * sizeof(double)));
gpuErrchk(cudaMalloc((void**)&corr_d, N * sizeof(double)));
/*
for(int index =0; index < N ; ++index)
{
printf("Index: %d Px: %e, Py: %e, Pz: %e\n",index,xcomp_dipole[index],ycomp_dipole[index],zcomp_dipole[index]);
printf("Index: %d Px: %e, Py: %e, Pz: %e\n",index,dipole_x[index],dipole_y[index],dipole_z[index]);
}
*/
gpuErrchk(cudaMemcpy(xcomp_dipole_d, xcomp_dipole, N * sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(ycomp_dipole_d, ycomp_dipole, N * sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(zcomp_dipole_d, zcomp_dipole, N * sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(corr_d, corr_h, N * sizeof(double), cudaMemcpyHostToDevice));
int number_of_blocks;
number_of_blocks = ( N/1024 ) + 1;
dipoleCorrelation<<<number_of_blocks,1024>>> (xcomp_dipole_d, ycomp_dipole_d, zcomp_dipole_d, corr_d, N);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(corr_h, corr_d, N * sizeof(double), cudaMemcpyDeviceToHost));
outfile<<"## charge velocity autocorrelation function"<<endl;
outfile<<"# time(tau)\t wcorr"<<endl;
for(int count= 0; count < N ; ++count )
{
outfile << t[count] << "\t" << corr_h[count]<<endl;
// cout << t[count] << "\t" << corr_h[count]<<endl;
//dipole_corr.push_back(local_corr/(length - tau));
//corr_time.push_back(tau * dt);
}
outfile.close();
delete [] corr_h;
corr_h = NULL;
gpuErrchk(cudaFree(corr_d));
}
|
70765769cdef20968e51efdabeb5ce903e37eabc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 Nervana Systems Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C"
__global__ void __launch_bounds__(256) hsgemm_nt_vec_128x128
(
short* param_Rand,
const short* param_A,
const float* param_B,
short* param_C,
int param_lda,
int param_ldb,
int param_ldc,
int param_m,
int param_n,
int param_k,
float param_alpha,
float param_beta,
int param_flags
)
{
__shared__ float share[128 * 8 * 4 + 4];
int tid = threadIdx.x;
share[tid] = 1;
param_C[tid] = share[255-tid];
}
| 70765769cdef20968e51efdabeb5ce903e37eabc.cu | /*
* Copyright 2014 Nervana Systems Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C"
__global__ void __launch_bounds__(256) hsgemm_nt_vec_128x128
(
short* param_Rand,
const short* param_A,
const float* param_B,
short* param_C,
int param_lda,
int param_ldb,
int param_ldc,
int param_m,
int param_n,
int param_k,
float param_alpha,
float param_beta,
int param_flags
)
{
__shared__ float share[128 * 8 * 4 + 4];
int tid = threadIdx.x;
share[tid] = 1;
param_C[tid] = share[255-tid];
}
|
915e4f9b3daf014a8d467e215209f41eafd36fcc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Intel Corporation: 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cfloat>
#include <vector>
#include "caffe/layers/eltwise_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, const int blob_idx,
Dtype* top_data, int* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype maxval = -FLT_MAX;
int maxidx = -1;
if (bottom_data_a[index] > bottom_data_b[index]) {
// only update for very first bottom_data blob (blob_idx == 0)
if (blob_idx == 0) {
maxval = bottom_data_a[index];
top_data[index] = maxval;
maxidx = blob_idx;
mask[index] = maxidx;
}
} else {
maxval = bottom_data_b[index];
top_data[index] = maxval;
maxidx = blob_idx + 1;
mask[index] = maxidx;
}
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int* mask = NULL;
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
for (int i = 2; i < bottom.size(); ++i) {
caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_SUM:
// caffe_gpu_set(count, Dtype(0.), top_data);
if (bottom[0]->gpu_data() != top_data) {
caffe_copy(count, bottom[0]->gpu_data(), top_data);
}
caffe_gpu_scal(count, coeffs_[0], top_data);
// TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1?
for (int i = 1; i < bottom.size(); ++i) {
assert(bottom[i]->gpu_data() != top_data);
caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data,
mask);
for (int i = 2; i < bottom.size(); ++i) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, bottom[i]->gpu_data(), i - 1, top_data, mask);
}
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff,
const int blob_idx, const int* mask,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype gradient = 0;
if (mask[index] == blob_idx) {
gradient += top_diff[index];
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
if (stable_prod_grad_) {
bool initialized = false;
for (int j = 0; j < bottom.size(); ++j) {
if (i == j) {
continue;
}
if (!initialized) {
caffe_copy(count, bottom[j]->gpu_data(), bottom_diff);
initialized = true;
} else {
caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff,
bottom_diff);
}
}
} else {
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
}
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
break;
case EltwiseParameter_EltwiseOp_SUM:
if (coeffs_[i] == Dtype(1.)) {
caffe_copy(count, top_diff, bottom_diff);
} else {
caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.gpu_data();
MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, i, mask, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer);
} // namespace caffe
| 915e4f9b3daf014a8d467e215209f41eafd36fcc.cu | /*
All modification made by Intel Corporation: © 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cfloat>
#include <vector>
#include "caffe/layers/eltwise_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, const int blob_idx,
Dtype* top_data, int* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype maxval = -FLT_MAX;
int maxidx = -1;
if (bottom_data_a[index] > bottom_data_b[index]) {
// only update for very first bottom_data blob (blob_idx == 0)
if (blob_idx == 0) {
maxval = bottom_data_a[index];
top_data[index] = maxval;
maxidx = blob_idx;
mask[index] = maxidx;
}
} else {
maxval = bottom_data_b[index];
top_data[index] = maxval;
maxidx = blob_idx + 1;
mask[index] = maxidx;
}
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int* mask = NULL;
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
for (int i = 2; i < bottom.size(); ++i) {
caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_SUM:
// caffe_gpu_set(count, Dtype(0.), top_data);
if (bottom[0]->gpu_data() != top_data) {
caffe_copy(count, bottom[0]->gpu_data(), top_data);
}
caffe_gpu_scal(count, coeffs_[0], top_data);
// TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1?
for (int i = 1; i < bottom.size(); ++i) {
assert(bottom[i]->gpu_data() != top_data);
caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data,
mask);
for (int i = 2; i < bottom.size(); ++i) {
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, bottom[i]->gpu_data(), i - 1, top_data, mask);
}
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff,
const int blob_idx, const int* mask,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype gradient = 0;
if (mask[index] == blob_idx) {
gradient += top_diff[index];
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
if (stable_prod_grad_) {
bool initialized = false;
for (int j = 0; j < bottom.size(); ++j) {
if (i == j) {
continue;
}
if (!initialized) {
caffe_copy(count, bottom[j]->gpu_data(), bottom_diff);
initialized = true;
} else {
caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff,
bottom_diff);
}
}
} else {
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
}
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
break;
case EltwiseParameter_EltwiseOp_SUM:
if (coeffs_[i] == Dtype(1.)) {
caffe_copy(count, top_diff, bottom_diff);
} else {
caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.gpu_data();
MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, i, mask, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer);
} // namespace caffe
|
17f3c38cc1e295a79039bc3f0643ec5078591af8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/CellListGPU.cu
* \brief Defines GPU functions and kernels used by mpcd::CellListGPU
*/
#include "CellListGPU.cuh"
namespace mpcd
{
namespace gpu
{
namespace kernel
{
//! Kernel to compute the MPCD cell list on the GPU
/*!
* \param d_cell_np Array of number of particles per cell
* \param d_cell_list 2D array of MPCD particles in each cell
* \param d_conditions Conditions flags for error reporting
* \param d_vel MPCD particle velocities
* \param d_embed_cell_ids Cell indexes of embedded particles
* \param d_pos MPCD particle positions
* \param d_pos_embed Particle positions
* \param d_embed_member_idx Indexes of embedded particles in \a d_pos_embed
* \param periodic Flags if local simulation is periodic
* \param origin_idx Global origin index for the local box
* \param grid_shift Random grid shift vector
* \param global_lo Lower bound of global orthorhombic simulation box
* \param n_global_cell Global dimensions of the cell list, including padding
* \param cell_size Cell width
* \param cell_np_max Maximum number of particles per cell
* \param cell_indexer 3D indexer for cell id
* \param cell_list_indexer 2D indexer for particle position in cell
* \param N_mpcd Number of MPCD particles
* \param N_tot Total number of particle (MPCD + embedded)
*
* \b Implementation
* One thread is launched per particle. The particle is floored into a bin subject to a random grid
* shift. The number of particles in that bin is atomically incremented. If the addition of the
* particle will not overflow the allocated memory, the particle is written into that bin.
* Otherwise, a flag is set to resize the cell list and recompute. The MPCD particle's cell id is
* stashed into the velocity array.
*/
__global__ void compute_cell_list(unsigned int* d_cell_np,
unsigned int* d_cell_list,
uint3* d_conditions,
Scalar4* d_vel,
unsigned int* d_embed_cell_ids,
const Scalar4* d_pos,
const Scalar4* d_pos_embed,
const unsigned int* d_embed_member_idx,
const uchar3 periodic,
const int3 origin_idx,
const Scalar3 grid_shift,
const Scalar3 global_lo,
const uint3 n_global_cell,
const Scalar cell_size,
const unsigned int cell_np_max,
const Index3D cell_indexer,
const Index2D cell_list_indexer,
const unsigned int N_mpcd,
const unsigned int N_tot)
{
// one thread per particle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_tot)
return;
Scalar4 postype_i;
if (idx < N_mpcd)
{
postype_i = d_pos[idx];
}
else
{
postype_i = d_pos_embed[d_embed_member_idx[idx - N_mpcd]];
}
const Scalar3 pos_i = make_scalar3(postype_i.x, postype_i.y, postype_i.z);
if (isnan(pos_i.x) || isnan(pos_i.y) || isnan(pos_i.z))
{
(*d_conditions).y = idx + 1;
return;
}
// bin particle with grid shift assuming orthorhombic box (already validated)
const Scalar3 delta = (pos_i - grid_shift) - global_lo;
int3 global_bin = make_int3(::floor(delta.x / cell_size),
::floor(delta.y / cell_size),
::floor(delta.z / cell_size));
// wrap cell back through the boundaries (grid shifting may send +/- 1 outside of range)
// this is done using periodic from the "local" box, since this will be periodic
// only when there is one rank along the dimension
if (periodic.x)
{
if (global_bin.x == (int)n_global_cell.x)
global_bin.x = 0;
else if (global_bin.x == -1)
global_bin.x = n_global_cell.x - 1;
}
if (periodic.y)
{
if (global_bin.y == (int)n_global_cell.y)
global_bin.y = 0;
else if (global_bin.y == -1)
global_bin.y = n_global_cell.y - 1;
}
if (periodic.z)
{
if (global_bin.z == (int)n_global_cell.z)
global_bin.z = 0;
else if (global_bin.z == -1)
global_bin.z = n_global_cell.z - 1;
}
// compute the local cell
int3 bin = make_int3(global_bin.x - origin_idx.x,
global_bin.y - origin_idx.y,
global_bin.z - origin_idx.z);
// validate and make sure no particles blew out of the box
if ((bin.x < 0 || bin.x >= (int)cell_indexer.getW())
|| (bin.y < 0 || bin.y >= (int)cell_indexer.getH())
|| (bin.z < 0 || bin.z >= (int)cell_indexer.getD()))
{
(*d_conditions).z = idx + 1;
return;
}
const unsigned int bin_idx = cell_indexer(bin.x, bin.y, bin.z);
const unsigned int offset = atomicInc(&d_cell_np[bin_idx], 0xffffffff);
if (offset < cell_np_max)
{
d_cell_list[cell_list_indexer(offset, bin_idx)] = idx;
}
else
{
// overflow
atomicMax(&(*d_conditions).x, offset + 1);
}
// stash the current particle bin into the velocity array
if (idx < N_mpcd)
{
d_vel[idx].w = __int_as_scalar(bin_idx);
}
else
{
d_embed_cell_ids[idx - N_mpcd] = bin_idx;
}
}
/*!
* \param d_migrate_flag Flag signaling migration is required (output)
* \param d_pos Embedded particle positions
* \param d_group Indexes into \a d_pos for particles in embedded group
* \param box Box covered by this domain
* \param num_dim Dimensionality of system
* \param N Number of particles in group
*
* \b Implementation
* Using one thread per particle, each particle position is compared to the
* bounds of the simulation box. If a particle lies outside the box, \a d_migrate_flag
* has its bits set using an atomicMax transaction. The caller should then trigger
* a communication step to migrate particles to their appropriate ranks.
*/
__global__ void cell_check_migrate_embed(unsigned int* d_migrate_flag,
const Scalar4* d_pos,
const unsigned int* d_group,
const BoxDim box,
const unsigned int num_dim,
const unsigned int N)
{
// one thread per particle in group
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= N)
return;
const unsigned int idx = d_group[tid];
const Scalar4 postype = d_pos[idx];
const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
const Scalar3 lo = box.getLo();
const Scalar3 hi = box.getHi();
const uchar3 periodic = box.getPeriodic();
if ((!periodic.x && (pos.x >= hi.x || pos.x < lo.x))
|| (!periodic.y && (pos.y >= hi.y || pos.y < lo.y))
|| (!periodic.z && num_dim == 3 && (pos.z >= hi.z || pos.z < lo.z)))
{
atomicMax(d_migrate_flag, 1);
}
}
__global__ void cell_apply_sort(unsigned int* d_cell_list,
const unsigned int* d_rorder,
const unsigned int* d_cell_np,
const Index2D cli,
const unsigned int N_mpcd,
const unsigned int N_cli)
{
// one thread per cell-list entry
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_cli)
return;
// convert the entry 1D index into a 2D index
const unsigned int cell = idx / cli.getW();
const unsigned int offset = idx - (cell * cli.getW());
/* here comes some terrible execution divergence */
// check if the cell is filled
const unsigned int np = d_cell_np[cell];
if (offset < np)
{
// check if this is an MPCD particle
const unsigned int pid = d_cell_list[idx];
if (pid < N_mpcd)
{
d_cell_list[idx] = d_rorder[pid];
}
}
}
} // end namespace kernel
} // end namespace gpu
} // end namespace mpcd
/*!
* \param d_cell_np Array of number of particles per cell
* \param d_cell_list 2D array of MPCD particles in each cell
* \param d_conditions Conditions flags for error reporting
* \param d_vel MPCD particle velocities
* \param d_embed_cell_ids Cell indexes of embedded particles
* \param d_pos MPCD particle positions
* \param d_pos_embed Particle positions
* \param d_embed_member_idx Indexes of embedded particles in \a d_pos_embed
* \param periodic Flags if local simulation is periodic
* \param origin_idx Global origin index for the local box
* \param grid_shift Random grid shift vector
* \param global_lo Lower bound of global orthorhombic simulation box
* \param n_global_cell Global dimensions of the cell list, including padding
* \param cell_size Cell width
* \param cell_np_max Maximum number of particles per cell
* \param cell_indexer 3D indexer for cell id
* \param cell_list_indexer 2D indexer for particle position in cell
* \param N_mpcd Number of MPCD particles
* \param N_tot Total number of particle (MPCD + embedded)
* \param block_size Number of threads per block
*
* \returns hipSuccess on completion, or an error on failure
*/
hipError_t mpcd::gpu::compute_cell_list(unsigned int* d_cell_np,
unsigned int* d_cell_list,
uint3* d_conditions,
Scalar4* d_vel,
unsigned int* d_embed_cell_ids,
const Scalar4* d_pos,
const Scalar4* d_pos_embed,
const unsigned int* d_embed_member_idx,
const uchar3& periodic,
const int3& origin_idx,
const Scalar3& grid_shift,
const Scalar3& global_lo,
const uint3& n_global_cell,
const Scalar cell_size,
const unsigned int cell_np_max,
const Index3D& cell_indexer,
const Index2D& cell_list_indexer,
const unsigned int N_mpcd,
const unsigned int N_tot,
const unsigned int block_size)
{
// set the number of particles in each cell to zero
hipError_t error
= hipMemset(d_cell_np, 0, sizeof(unsigned int) * cell_indexer.getNumElements());
if (error != hipSuccess)
return error;
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::compute_cell_list);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_tot / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::compute_cell_list), dim3(grid), dim3(run_block_size), 0, 0, d_cell_np,
d_cell_list,
d_conditions,
d_vel,
d_embed_cell_ids,
d_pos,
d_pos_embed,
d_embed_member_idx,
periodic,
origin_idx,
grid_shift,
global_lo,
n_global_cell,
cell_size,
cell_np_max,
cell_indexer,
cell_list_indexer,
N_mpcd,
N_tot);
return hipSuccess;
}
/*!
* \param d_migrate_flag Flag signaling migration is required (output)
* \param d_pos Embedded particle positions
* \param d_group Indexes into \a d_pos for particles in embedded group
* \param box Box covered by this domain
* \param N Number of particles in group
* \param block_size Number of threads per block
*
* \sa mpcd::gpu::kernel::cell_check_migrate_embed
*/
hipError_t mpcd::gpu::cell_check_migrate_embed(unsigned int* d_migrate_flag,
const Scalar4* d_pos,
const unsigned int* d_group,
const BoxDim& box,
const unsigned int num_dim,
const unsigned int N,
const unsigned int block_size)
{
// ensure that the flag is always zeroed even if the caller forgets
hipMemset(d_migrate_flag, 0, sizeof(unsigned int));
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::cell_check_migrate_embed);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::cell_check_migrate_embed), dim3(grid), dim3(run_block_size), 0, 0, d_migrate_flag,
d_pos,
d_group,
box,
num_dim,
N);
return hipSuccess;
}
hipError_t mpcd::gpu::cell_apply_sort(unsigned int* d_cell_list,
const unsigned int* d_rorder,
const unsigned int* d_cell_np,
const Index2D& cli,
const unsigned int N_mpcd,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::cell_apply_sort);
max_block_size = attr.maxThreadsPerBlock;
}
const unsigned int N_cli = cli.getNumElements();
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_cli / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::cell_apply_sort), dim3(grid), dim3(run_block_size), 0, 0, d_cell_list,
d_rorder,
d_cell_np,
cli,
N_mpcd,
N_cli);
return hipSuccess;
}
| 17f3c38cc1e295a79039bc3f0643ec5078591af8.cu | // Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/CellListGPU.cu
* \brief Defines GPU functions and kernels used by mpcd::CellListGPU
*/
#include "CellListGPU.cuh"
namespace mpcd
{
namespace gpu
{
namespace kernel
{
//! Kernel to compute the MPCD cell list on the GPU
/*!
* \param d_cell_np Array of number of particles per cell
* \param d_cell_list 2D array of MPCD particles in each cell
* \param d_conditions Conditions flags for error reporting
* \param d_vel MPCD particle velocities
* \param d_embed_cell_ids Cell indexes of embedded particles
* \param d_pos MPCD particle positions
* \param d_pos_embed Particle positions
* \param d_embed_member_idx Indexes of embedded particles in \a d_pos_embed
* \param periodic Flags if local simulation is periodic
* \param origin_idx Global origin index for the local box
* \param grid_shift Random grid shift vector
* \param global_lo Lower bound of global orthorhombic simulation box
* \param n_global_cell Global dimensions of the cell list, including padding
* \param cell_size Cell width
* \param cell_np_max Maximum number of particles per cell
* \param cell_indexer 3D indexer for cell id
* \param cell_list_indexer 2D indexer for particle position in cell
* \param N_mpcd Number of MPCD particles
* \param N_tot Total number of particle (MPCD + embedded)
*
* \b Implementation
* One thread is launched per particle. The particle is floored into a bin subject to a random grid
* shift. The number of particles in that bin is atomically incremented. If the addition of the
* particle will not overflow the allocated memory, the particle is written into that bin.
* Otherwise, a flag is set to resize the cell list and recompute. The MPCD particle's cell id is
* stashed into the velocity array.
*/
__global__ void compute_cell_list(unsigned int* d_cell_np,
unsigned int* d_cell_list,
uint3* d_conditions,
Scalar4* d_vel,
unsigned int* d_embed_cell_ids,
const Scalar4* d_pos,
const Scalar4* d_pos_embed,
const unsigned int* d_embed_member_idx,
const uchar3 periodic,
const int3 origin_idx,
const Scalar3 grid_shift,
const Scalar3 global_lo,
const uint3 n_global_cell,
const Scalar cell_size,
const unsigned int cell_np_max,
const Index3D cell_indexer,
const Index2D cell_list_indexer,
const unsigned int N_mpcd,
const unsigned int N_tot)
{
// one thread per particle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_tot)
return;
Scalar4 postype_i;
if (idx < N_mpcd)
{
postype_i = d_pos[idx];
}
else
{
postype_i = d_pos_embed[d_embed_member_idx[idx - N_mpcd]];
}
const Scalar3 pos_i = make_scalar3(postype_i.x, postype_i.y, postype_i.z);
if (isnan(pos_i.x) || isnan(pos_i.y) || isnan(pos_i.z))
{
(*d_conditions).y = idx + 1;
return;
}
// bin particle with grid shift assuming orthorhombic box (already validated)
const Scalar3 delta = (pos_i - grid_shift) - global_lo;
int3 global_bin = make_int3(std::floor(delta.x / cell_size),
std::floor(delta.y / cell_size),
std::floor(delta.z / cell_size));
// wrap cell back through the boundaries (grid shifting may send +/- 1 outside of range)
// this is done using periodic from the "local" box, since this will be periodic
// only when there is one rank along the dimension
if (periodic.x)
{
if (global_bin.x == (int)n_global_cell.x)
global_bin.x = 0;
else if (global_bin.x == -1)
global_bin.x = n_global_cell.x - 1;
}
if (periodic.y)
{
if (global_bin.y == (int)n_global_cell.y)
global_bin.y = 0;
else if (global_bin.y == -1)
global_bin.y = n_global_cell.y - 1;
}
if (periodic.z)
{
if (global_bin.z == (int)n_global_cell.z)
global_bin.z = 0;
else if (global_bin.z == -1)
global_bin.z = n_global_cell.z - 1;
}
// compute the local cell
int3 bin = make_int3(global_bin.x - origin_idx.x,
global_bin.y - origin_idx.y,
global_bin.z - origin_idx.z);
// validate and make sure no particles blew out of the box
if ((bin.x < 0 || bin.x >= (int)cell_indexer.getW())
|| (bin.y < 0 || bin.y >= (int)cell_indexer.getH())
|| (bin.z < 0 || bin.z >= (int)cell_indexer.getD()))
{
(*d_conditions).z = idx + 1;
return;
}
const unsigned int bin_idx = cell_indexer(bin.x, bin.y, bin.z);
const unsigned int offset = atomicInc(&d_cell_np[bin_idx], 0xffffffff);
if (offset < cell_np_max)
{
d_cell_list[cell_list_indexer(offset, bin_idx)] = idx;
}
else
{
// overflow
atomicMax(&(*d_conditions).x, offset + 1);
}
// stash the current particle bin into the velocity array
if (idx < N_mpcd)
{
d_vel[idx].w = __int_as_scalar(bin_idx);
}
else
{
d_embed_cell_ids[idx - N_mpcd] = bin_idx;
}
}
/*!
* \param d_migrate_flag Flag signaling migration is required (output)
* \param d_pos Embedded particle positions
* \param d_group Indexes into \a d_pos for particles in embedded group
* \param box Box covered by this domain
* \param num_dim Dimensionality of system
* \param N Number of particles in group
*
* \b Implementation
* Using one thread per particle, each particle position is compared to the
* bounds of the simulation box. If a particle lies outside the box, \a d_migrate_flag
* has its bits set using an atomicMax transaction. The caller should then trigger
* a communication step to migrate particles to their appropriate ranks.
*/
__global__ void cell_check_migrate_embed(unsigned int* d_migrate_flag,
const Scalar4* d_pos,
const unsigned int* d_group,
const BoxDim box,
const unsigned int num_dim,
const unsigned int N)
{
// one thread per particle in group
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= N)
return;
const unsigned int idx = d_group[tid];
const Scalar4 postype = d_pos[idx];
const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
const Scalar3 lo = box.getLo();
const Scalar3 hi = box.getHi();
const uchar3 periodic = box.getPeriodic();
if ((!periodic.x && (pos.x >= hi.x || pos.x < lo.x))
|| (!periodic.y && (pos.y >= hi.y || pos.y < lo.y))
|| (!periodic.z && num_dim == 3 && (pos.z >= hi.z || pos.z < lo.z)))
{
atomicMax(d_migrate_flag, 1);
}
}
__global__ void cell_apply_sort(unsigned int* d_cell_list,
const unsigned int* d_rorder,
const unsigned int* d_cell_np,
const Index2D cli,
const unsigned int N_mpcd,
const unsigned int N_cli)
{
// one thread per cell-list entry
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_cli)
return;
// convert the entry 1D index into a 2D index
const unsigned int cell = idx / cli.getW();
const unsigned int offset = idx - (cell * cli.getW());
/* here comes some terrible execution divergence */
// check if the cell is filled
const unsigned int np = d_cell_np[cell];
if (offset < np)
{
// check if this is an MPCD particle
const unsigned int pid = d_cell_list[idx];
if (pid < N_mpcd)
{
d_cell_list[idx] = d_rorder[pid];
}
}
}
} // end namespace kernel
} // end namespace gpu
} // end namespace mpcd
/*!
* \param d_cell_np Array of number of particles per cell
* \param d_cell_list 2D array of MPCD particles in each cell
* \param d_conditions Conditions flags for error reporting
* \param d_vel MPCD particle velocities
* \param d_embed_cell_ids Cell indexes of embedded particles
* \param d_pos MPCD particle positions
* \param d_pos_embed Particle positions
* \param d_embed_member_idx Indexes of embedded particles in \a d_pos_embed
* \param periodic Flags if local simulation is periodic
* \param origin_idx Global origin index for the local box
* \param grid_shift Random grid shift vector
* \param global_lo Lower bound of global orthorhombic simulation box
* \param n_global_cell Global dimensions of the cell list, including padding
* \param cell_size Cell width
* \param cell_np_max Maximum number of particles per cell
* \param cell_indexer 3D indexer for cell id
* \param cell_list_indexer 2D indexer for particle position in cell
* \param N_mpcd Number of MPCD particles
* \param N_tot Total number of particle (MPCD + embedded)
* \param block_size Number of threads per block
*
* \returns cudaSuccess on completion, or an error on failure
*/
cudaError_t mpcd::gpu::compute_cell_list(unsigned int* d_cell_np,
unsigned int* d_cell_list,
uint3* d_conditions,
Scalar4* d_vel,
unsigned int* d_embed_cell_ids,
const Scalar4* d_pos,
const Scalar4* d_pos_embed,
const unsigned int* d_embed_member_idx,
const uchar3& periodic,
const int3& origin_idx,
const Scalar3& grid_shift,
const Scalar3& global_lo,
const uint3& n_global_cell,
const Scalar cell_size,
const unsigned int cell_np_max,
const Index3D& cell_indexer,
const Index2D& cell_list_indexer,
const unsigned int N_mpcd,
const unsigned int N_tot,
const unsigned int block_size)
{
// set the number of particles in each cell to zero
cudaError_t error
= cudaMemset(d_cell_np, 0, sizeof(unsigned int) * cell_indexer.getNumElements());
if (error != cudaSuccess)
return error;
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::compute_cell_list);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_tot / run_block_size + 1);
mpcd::gpu::kernel::compute_cell_list<<<grid, run_block_size>>>(d_cell_np,
d_cell_list,
d_conditions,
d_vel,
d_embed_cell_ids,
d_pos,
d_pos_embed,
d_embed_member_idx,
periodic,
origin_idx,
grid_shift,
global_lo,
n_global_cell,
cell_size,
cell_np_max,
cell_indexer,
cell_list_indexer,
N_mpcd,
N_tot);
return cudaSuccess;
}
/*!
* \param d_migrate_flag Flag signaling migration is required (output)
* \param d_pos Embedded particle positions
* \param d_group Indexes into \a d_pos for particles in embedded group
* \param box Box covered by this domain
* \param N Number of particles in group
* \param block_size Number of threads per block
*
* \sa mpcd::gpu::kernel::cell_check_migrate_embed
*/
cudaError_t mpcd::gpu::cell_check_migrate_embed(unsigned int* d_migrate_flag,
const Scalar4* d_pos,
const unsigned int* d_group,
const BoxDim& box,
const unsigned int num_dim,
const unsigned int N,
const unsigned int block_size)
{
// ensure that the flag is always zeroed even if the caller forgets
cudaMemset(d_migrate_flag, 0, sizeof(unsigned int));
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::cell_check_migrate_embed);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N / run_block_size + 1);
mpcd::gpu::kernel::cell_check_migrate_embed<<<grid, run_block_size>>>(d_migrate_flag,
d_pos,
d_group,
box,
num_dim,
N);
return cudaSuccess;
}
cudaError_t mpcd::gpu::cell_apply_sort(unsigned int* d_cell_list,
const unsigned int* d_rorder,
const unsigned int* d_cell_np,
const Index2D& cli,
const unsigned int N_mpcd,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::cell_apply_sort);
max_block_size = attr.maxThreadsPerBlock;
}
const unsigned int N_cli = cli.getNumElements();
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_cli / run_block_size + 1);
mpcd::gpu::kernel::cell_apply_sort<<<grid, run_block_size>>>(d_cell_list,
d_rorder,
d_cell_np,
cli,
N_mpcd,
N_cli);
return cudaSuccess;
}
|
8fc1f9f7c52eee9f7b8d495def19fc34781f3057.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Stan Tomov
@generated from zgemv_conjv.cu normal z -> d, Fri Jan 30 19:00:08 2015
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
#define num_threads 256
__global__ void
dgemv_conjv_kernel(
int m, int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx, double beta,
double * __restrict__ y, int incy)
{
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
if ( ind < m ) {
double res = MAGMA_D_ZERO;
#pragma unroll
for( int i=0; i < n; i ++ ) {
res += A[0] * MAGMA_D_CNJG(x[0]);
A += lda;
x += incx;
}
y[ind*incy] = alpha * res + beta * y[ind*incy];
}
}
/**
Purpose
-------
DGEMV_CONJV performs the matrix-vector operation
y := alpha*A*conj(x) + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha DOUBLE_PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA DOUBLE_PRECISION array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
dx DOUBLE_PRECISION array of dimension n
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE REAL
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy DOUBLE PRECISION array of dimension m
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_dgemv_conjv(
magma_int_t m, magma_int_t n, double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dx, magma_int_t incx,
double beta,
magmaDouble_ptr dy, magma_int_t incy)
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -5;
else if ( incx == 0 )
info = -7;
else if ( incy == 0 )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t blocks = (m - 1)/num_threads + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(num_threads, 1, 1);
hipLaunchKernelGGL(( dgemv_conjv_kernel), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, alpha, dA, ldda, dx, incx, beta, dy, incy);
}
#undef num_threads
| 8fc1f9f7c52eee9f7b8d495def19fc34781f3057.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Stan Tomov
@generated from zgemv_conjv.cu normal z -> d, Fri Jan 30 19:00:08 2015
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
#define num_threads 256
__global__ void
dgemv_conjv_kernel(
int m, int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx, double beta,
double * __restrict__ y, int incy)
{
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
if ( ind < m ) {
double res = MAGMA_D_ZERO;
#pragma unroll
for( int i=0; i < n; i ++ ) {
res += A[0] * MAGMA_D_CNJG(x[0]);
A += lda;
x += incx;
}
y[ind*incy] = alpha * res + beta * y[ind*incy];
}
}
/**
Purpose
-------
DGEMV_CONJV performs the matrix-vector operation
y := alpha*A*conj(x) + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha DOUBLE_PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA DOUBLE_PRECISION array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
dx DOUBLE_PRECISION array of dimension n
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE REAL
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy DOUBLE PRECISION array of dimension m
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_dgemv_conjv(
magma_int_t m, magma_int_t n, double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dx, magma_int_t incx,
double beta,
magmaDouble_ptr dy, magma_int_t incy)
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -5;
else if ( incx == 0 )
info = -7;
else if ( incy == 0 )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t blocks = (m - 1)/num_threads + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(num_threads, 1, 1);
dgemv_conjv_kernel<<< grid, threads, 0, magma_stream >>>
(m, n, alpha, dA, ldda, dx, incx, beta, dy, incy);
}
#undef num_threads
|
48e89ddf2f0f40f93b09c1933ba4e66a94509437.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <chrono>
#include <limits>
#include <spconv/mp_helper.h>
#include <spconv/indice.h>
#include <spconv/indice.cu.h>
#include <tensorview/helper_launch.h>
#include <tensorview/tensorview.h>
#include <type_traits>
#include <utility/timer.h>
namespace spconv {
namespace functor {
template <typename Index, typename IndexGrid, unsigned NDim>
struct CreateConvIndicePairFunctorP1<tv::GPU, Index, IndexGrid, NDim> {
Index operator()(const tv::GPU &d, tv::TensorView<const Index> indicesIn,
tv::TensorView<Index> indicesOut,
tv::TensorView<IndexGrid> gridsOut,
tv::TensorView<Index> indicePairs,
tv::TensorView<Index> indiceNum,
tv::TensorView<Index> indicePairUnique,
const tv::SimpleVector<Index, NDim> kernelSize,
const tv::SimpleVector<Index, NDim> stride,
const tv::SimpleVector<Index, NDim> padding,
const tv::SimpleVector<Index, NDim> dilation,
const tv::SimpleVector<Index, NDim> outSpatialShape,
bool transpose) {
Index batchSize = gridsOut.dim(0);
auto numActIn = indicesIn.dim(0);
if (numActIn == 0)
return 0;
// auto timer = spconv::CudaContextTimer<>();
if (transpose)
hipLaunchKernelGGL(( prepareDeConvIndicePairsKernel<Index, IndexGrid, NDim, 256>)
, dim3(tv::launch::getBlocks(numActIn)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.stream(), indicesIn, indicesOut, gridsOut, indicePairs,
indiceNum, indicePairUnique, kernelSize, stride,
padding, dilation, outSpatialShape);
else
hipLaunchKernelGGL(( prepareIndicePairsKernel<Index, IndexGrid, NDim, 256>)
, dim3(tv::launch::getBlocks(numActIn)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.stream(), indicesIn, indicesOut, gridsOut, indicePairs,
indiceNum, indicePairUnique, kernelSize, stride,
padding, dilation, outSpatialShape);
TV_CHECK_CUDA_ERR();
// std::cout << "p1 gene time " << timer.report() / 1000.0 << std::endl;
return 1;
}
};
template <typename Index, typename IndexGrid, unsigned NDim>
struct CreateConvIndicePairFunctorP2<tv::GPU, Index, IndexGrid, NDim> {
Index operator()(const tv::GPU &d, tv::TensorView<const Index> indicesIn,
tv::TensorView<Index> indicesOut,
tv::TensorView<IndexGrid> gridsOut,
tv::TensorView<Index> indicePairs,
tv::TensorView<Index> indiceNum,
tv::TensorView<Index> indicePairUnique,
const tv::SimpleVector<Index, NDim> outSpatialShape,
bool transpose, bool resetGrid) {
Index batchSize = gridsOut.dim(0);
auto kernelVolume = indicePairs.dim(0);
auto numActIn = indicesIn.dim(0);
if (numActIn == 0)
return 0;
Index numAct = indicePairUnique.dim(0) - 1;
hipLaunchKernelGGL(( assignGridAndIndiceOutKernel<Index, IndexGrid, NDim>)
, dim3(tv::launch::getBlocks(numAct)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.stream(), indicesOut, gridsOut, numAct, indicePairs,
indicePairUnique, outSpatialShape, batchSize);
TV_CHECK_CUDA_ERR();
hipLaunchKernelGGL(( assignIndicePairsKernel<Index, IndexGrid, NDim>)
, dim3(tv::launch::getBlocks(numActIn)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.stream(), indicesOut, gridsOut, numActIn, indicePairs,
indicePairUnique, outSpatialShape);
TV_CHECK_CUDA_ERR();
if (resetGrid) {
hipLaunchKernelGGL(( resetGridKernel<Index, IndexGrid, NDim>)
, dim3(tv::launch::getBlocks(numAct)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.stream(), indicePairUnique.data(), gridsOut, numAct);
TV_CHECK_CUDA_ERR();
}
return numAct;
}
};
template <typename Index, typename IndexGrid, unsigned NDim>
struct CreateSubMIndicePairFunctor<tv::GPU, Index, IndexGrid, NDim> {
Index operator()(const tv::GPU &d, tv::TensorView<const Index> indicesIn,
tv::TensorView<IndexGrid> gridsOut,
tv::TensorView<Index> indicePairs,
tv::TensorView<Index> indiceNum,
const tv::SimpleVector<Index, NDim> kernelSize,
const tv::SimpleVector<Index, NDim> stride,
const tv::SimpleVector<Index, NDim> padding,
const tv::SimpleVector<Index, NDim> dilation,
const tv::SimpleVector<Index, NDim> outSpatialShape,
bool transpose, bool resetGrid) {
auto numActIn = indicesIn.dim(0);
if (numActIn == 0)
return 0;
// auto timer = spconv::CudaContextTimer<>();
hipLaunchKernelGGL(( prepareSubMGridKernel<Index, IndexGrid, NDim>)
, dim3(tv::launch::getBlocks(numActIn)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.stream(), indicesIn, gridsOut, outSpatialShape);
TV_CHECK_CUDA_ERR();
hipLaunchKernelGGL(( getSubMIndicePairsKernel<Index, IndexGrid, NDim>)
, dim3(tv::launch::getBlocks(numActIn)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.stream(), indicesIn, gridsOut, indicePairs, indiceNum,
kernelSize, stride, padding, dilation, outSpatialShape);
TV_CHECK_CUDA_ERR();
// std::cout << "subm gene time " << timer.report() / 1000.0 << std::endl;
if (resetGrid) {
hipLaunchKernelGGL(( resetGridSubMKernel<Index, IndexGrid, NDim>)
, dim3(tv::launch::getBlocks(numActIn)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.stream(), indicesIn.data(), gridsOut, outSpatialShape, numActIn);
TV_CHECK_CUDA_ERR();
}
return numActIn;
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_INDEX_NDIM(Index, NDIM) \
template struct functor::CreateConvIndicePairFunctor<tv::GPU, Index, int, \
NDIM>; \
template struct functor::CreateConvIndicePairFunctorP1<tv::GPU, Index, int, \
NDIM>; \
template struct functor::CreateConvIndicePairFunctorP2<tv::GPU, Index, int, \
NDIM>; \
template struct functor::CreateSubMIndicePairFunctor<tv::GPU, Index, int, \
NDIM>;
#define DECLARE_GPU_INDEX(Index) \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 1); \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 2); \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 3); \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 4);
DECLARE_GPU_INDEX(int);
#undef DECLARE_GPU_INDEX
#undef DECLARE_GPU_SPECS_INDEX_NDIM
} // namespace spconv | 48e89ddf2f0f40f93b09c1933ba4e66a94509437.cu | // Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <chrono>
#include <limits>
#include <spconv/mp_helper.h>
#include <spconv/indice.h>
#include <spconv/indice.cu.h>
#include <tensorview/helper_launch.h>
#include <tensorview/tensorview.h>
#include <type_traits>
#include <utility/timer.h>
namespace spconv {
namespace functor {
template <typename Index, typename IndexGrid, unsigned NDim>
struct CreateConvIndicePairFunctorP1<tv::GPU, Index, IndexGrid, NDim> {
Index operator()(const tv::GPU &d, tv::TensorView<const Index> indicesIn,
tv::TensorView<Index> indicesOut,
tv::TensorView<IndexGrid> gridsOut,
tv::TensorView<Index> indicePairs,
tv::TensorView<Index> indiceNum,
tv::TensorView<Index> indicePairUnique,
const tv::SimpleVector<Index, NDim> kernelSize,
const tv::SimpleVector<Index, NDim> stride,
const tv::SimpleVector<Index, NDim> padding,
const tv::SimpleVector<Index, NDim> dilation,
const tv::SimpleVector<Index, NDim> outSpatialShape,
bool transpose) {
Index batchSize = gridsOut.dim(0);
auto numActIn = indicesIn.dim(0);
if (numActIn == 0)
return 0;
// auto timer = spconv::CudaContextTimer<>();
if (transpose)
prepareDeConvIndicePairsKernel<Index, IndexGrid, NDim, 256>
<<<tv::launch::getBlocks(numActIn), tv::launch::CUDA_NUM_THREADS, 0,
d.stream()>>>(indicesIn, indicesOut, gridsOut, indicePairs,
indiceNum, indicePairUnique, kernelSize, stride,
padding, dilation, outSpatialShape);
else
prepareIndicePairsKernel<Index, IndexGrid, NDim, 256>
<<<tv::launch::getBlocks(numActIn), tv::launch::CUDA_NUM_THREADS, 0,
d.stream()>>>(indicesIn, indicesOut, gridsOut, indicePairs,
indiceNum, indicePairUnique, kernelSize, stride,
padding, dilation, outSpatialShape);
TV_CHECK_CUDA_ERR();
// std::cout << "p1 gene time " << timer.report() / 1000.0 << std::endl;
return 1;
}
};
template <typename Index, typename IndexGrid, unsigned NDim>
struct CreateConvIndicePairFunctorP2<tv::GPU, Index, IndexGrid, NDim> {
Index operator()(const tv::GPU &d, tv::TensorView<const Index> indicesIn,
tv::TensorView<Index> indicesOut,
tv::TensorView<IndexGrid> gridsOut,
tv::TensorView<Index> indicePairs,
tv::TensorView<Index> indiceNum,
tv::TensorView<Index> indicePairUnique,
const tv::SimpleVector<Index, NDim> outSpatialShape,
bool transpose, bool resetGrid) {
Index batchSize = gridsOut.dim(0);
auto kernelVolume = indicePairs.dim(0);
auto numActIn = indicesIn.dim(0);
if (numActIn == 0)
return 0;
Index numAct = indicePairUnique.dim(0) - 1;
assignGridAndIndiceOutKernel<Index, IndexGrid, NDim>
<<<tv::launch::getBlocks(numAct), tv::launch::CUDA_NUM_THREADS, 0,
d.stream()>>>(indicesOut, gridsOut, numAct, indicePairs,
indicePairUnique, outSpatialShape, batchSize);
TV_CHECK_CUDA_ERR();
assignIndicePairsKernel<Index, IndexGrid, NDim>
<<<tv::launch::getBlocks(numActIn), tv::launch::CUDA_NUM_THREADS, 0,
d.stream()>>>(indicesOut, gridsOut, numActIn, indicePairs,
indicePairUnique, outSpatialShape);
TV_CHECK_CUDA_ERR();
if (resetGrid) {
resetGridKernel<Index, IndexGrid, NDim>
<<<tv::launch::getBlocks(numAct), tv::launch::CUDA_NUM_THREADS, 0,
d.stream()>>>(indicePairUnique.data(), gridsOut, numAct);
TV_CHECK_CUDA_ERR();
}
return numAct;
}
};
template <typename Index, typename IndexGrid, unsigned NDim>
struct CreateSubMIndicePairFunctor<tv::GPU, Index, IndexGrid, NDim> {
Index operator()(const tv::GPU &d, tv::TensorView<const Index> indicesIn,
tv::TensorView<IndexGrid> gridsOut,
tv::TensorView<Index> indicePairs,
tv::TensorView<Index> indiceNum,
const tv::SimpleVector<Index, NDim> kernelSize,
const tv::SimpleVector<Index, NDim> stride,
const tv::SimpleVector<Index, NDim> padding,
const tv::SimpleVector<Index, NDim> dilation,
const tv::SimpleVector<Index, NDim> outSpatialShape,
bool transpose, bool resetGrid) {
auto numActIn = indicesIn.dim(0);
if (numActIn == 0)
return 0;
// auto timer = spconv::CudaContextTimer<>();
prepareSubMGridKernel<Index, IndexGrid, NDim>
<<<tv::launch::getBlocks(numActIn), tv::launch::CUDA_NUM_THREADS, 0,
d.stream()>>>(indicesIn, gridsOut, outSpatialShape);
TV_CHECK_CUDA_ERR();
getSubMIndicePairsKernel<Index, IndexGrid, NDim>
<<<tv::launch::getBlocks(numActIn), tv::launch::CUDA_NUM_THREADS, 0,
d.stream()>>>(indicesIn, gridsOut, indicePairs, indiceNum,
kernelSize, stride, padding, dilation, outSpatialShape);
TV_CHECK_CUDA_ERR();
// std::cout << "subm gene time " << timer.report() / 1000.0 << std::endl;
if (resetGrid) {
resetGridSubMKernel<Index, IndexGrid, NDim>
<<<tv::launch::getBlocks(numActIn), tv::launch::CUDA_NUM_THREADS, 0,
d.stream()>>>(indicesIn.data(), gridsOut, outSpatialShape, numActIn);
TV_CHECK_CUDA_ERR();
}
return numActIn;
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_INDEX_NDIM(Index, NDIM) \
template struct functor::CreateConvIndicePairFunctor<tv::GPU, Index, int, \
NDIM>; \
template struct functor::CreateConvIndicePairFunctorP1<tv::GPU, Index, int, \
NDIM>; \
template struct functor::CreateConvIndicePairFunctorP2<tv::GPU, Index, int, \
NDIM>; \
template struct functor::CreateSubMIndicePairFunctor<tv::GPU, Index, int, \
NDIM>;
#define DECLARE_GPU_INDEX(Index) \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 1); \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 2); \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 3); \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 4);
DECLARE_GPU_INDEX(int);
#undef DECLARE_GPU_INDEX
#undef DECLARE_GPU_SPECS_INDEX_NDIM
} // namespace spconv |
8d85ce752c90eeb0f3c0afb1cb5ecb2d8fdc08df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
template <typename Ptr2D, typename T> __global__ void copyMakeBorder(const Ptr2D src, PtrStepSz<T> dst, int top, int left)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
dst.ptr(y)[x] = src(y - top, x - left);
}
template <template <typename> class B, typename T> struct CopyMakeBorderDispatcher
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& dst, int top, int left,
const typename VecTraits<T>::elem_type* borderValue, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<T> brd(src.rows, src.cols, VecTraits<T>::make(borderValue));
BorderReader< PtrStep<T>, B<T> > brdSrc(src, brd);
hipLaunchKernelGGL(( copyMakeBorder), dim3(grid), dim3(block), 0, stream, brdSrc, dst, top, left);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
};
template <typename T, int cn> void copyMakeBorder_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode,
const T* borderValue, hipStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type vec_type;
typedef void (*caller_t)(const PtrStepSz<vec_type>& src, const PtrStepSz<vec_type>& dst, int top, int left, const T* borderValue, hipStream_t stream);
static const caller_t callers[5] =
{
CopyMakeBorderDispatcher<BrdReflect101, vec_type>::call,
CopyMakeBorderDispatcher<BrdReplicate, vec_type>::call,
CopyMakeBorderDispatcher<BrdConstant, vec_type>::call,
CopyMakeBorderDispatcher<BrdReflect, vec_type>::call,
CopyMakeBorderDispatcher<BrdWrap, vec_type>::call
};
callers[borderMode](PtrStepSz<vec_type>(src), PtrStepSz<vec_type>(dst), top, left, borderValue, stream);
}
template void copyMakeBorder_gpu<uchar, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<uchar, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<uchar, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<uchar, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<schar, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<schar, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<schar, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<schar, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<ushort, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<ushort, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<ushort, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<ushort, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<short, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<short, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<short, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<short, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<int, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<int, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<int, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<int, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<float, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, hipStream_t stream);
//template void copyMakeBorder_gpu<float, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<float, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, hipStream_t stream);
template void copyMakeBorder_gpu<float, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, hipStream_t stream);
} // namespace imgproc
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
| 8d85ce752c90eeb0f3c0afb1cb5ecb2d8fdc08df.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
template <typename Ptr2D, typename T> __global__ void copyMakeBorder(const Ptr2D src, PtrStepSz<T> dst, int top, int left)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
dst.ptr(y)[x] = src(y - top, x - left);
}
template <template <typename> class B, typename T> struct CopyMakeBorderDispatcher
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& dst, int top, int left,
const typename VecTraits<T>::elem_type* borderValue, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<T> brd(src.rows, src.cols, VecTraits<T>::make(borderValue));
BorderReader< PtrStep<T>, B<T> > brdSrc(src, brd);
copyMakeBorder<<<grid, block, 0, stream>>>(brdSrc, dst, top, left);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
};
template <typename T, int cn> void copyMakeBorder_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode,
const T* borderValue, cudaStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type vec_type;
typedef void (*caller_t)(const PtrStepSz<vec_type>& src, const PtrStepSz<vec_type>& dst, int top, int left, const T* borderValue, cudaStream_t stream);
static const caller_t callers[5] =
{
CopyMakeBorderDispatcher<BrdReflect101, vec_type>::call,
CopyMakeBorderDispatcher<BrdReplicate, vec_type>::call,
CopyMakeBorderDispatcher<BrdConstant, vec_type>::call,
CopyMakeBorderDispatcher<BrdReflect, vec_type>::call,
CopyMakeBorderDispatcher<BrdWrap, vec_type>::call
};
callers[borderMode](PtrStepSz<vec_type>(src), PtrStepSz<vec_type>(dst), top, left, borderValue, stream);
}
template void copyMakeBorder_gpu<uchar, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<uchar, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<uchar, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<uchar, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<schar, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<ushort, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<ushort, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<ushort, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<ushort, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<short, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<short, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<short, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<short, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<int, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<float, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
//template void copyMakeBorder_gpu<float, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<float, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
template void copyMakeBorder_gpu<float, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
} // namespace imgproc
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
|
d80c67baaec916f42bb689ed58d8f67a2805130d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (C) 2015 by Fernando Amat
* See license.txt for full license and copyright notice.
*
* Authors: Fernando Amat
* commonCUDA.cu
*
* Created on: June 5th, 2015
* Author: Fernando Amat
*
* \brief common functions and constants for CUDA
*/
#include <cstdint>
#include <algorithm>
#include <iostream>
#include "commonCUDA.h"
#include "hip/hip_runtime.h"
#include "book.h"
using namespace std;
//profiling it is better to not use all the threads
#define THREADS_CUDA_FOR_REDUCTION (MAX_THREADS_CUDA / 4)
/**
* functor for adding two numbers
*/
template <class T>
struct max_func
{
max_func(){};
__device__ T operator () (const T& a, const T& b) { return (a > b ? a : b); }
};
template <class T>
struct min_func
{
min_func(){};
__device__ T operator () (const T& a, const T& b) { return (a > b ? b : a); }
};
template <class T>
struct add_func
{
add_func(){};
__device__ T operator () (const T& a, const T& b) { return a + b; }
};
template <class T>
struct sub_func
{
sub_func(){};
__device__ T operator () (const T& a, const T& b) { return a - b; }
};
template <class T>
struct sub_pos_func
{
sub_pos_func(){};
__device__ T operator () (const T& a, const T& b) { return (a > b ? a-b : 0); }//lower bounded by zero
};
template <class T>
struct div_func
{
div_func(){};
__device__ T operator () (const T& a, const T& b) { return a / b; }
};
template <class T>
struct div_inv_func
{
div_inv_func(){};
__device__ T operator () (const T& a, const T& b) { return b / a; }
};
template <class T>
struct mul_func
{
mul_func(){};
__device__ T operator () (const T& a, const T& b) { return a * b; }
};
template <class T>
struct power_func
{
power_func(){};
__device__ T operator () (const T& a, const T& b) { return pow(a,b); }
};
template <>
struct power_func<unsigned char>
{
power_func(){};
__device__ unsigned char operator () (const unsigned char& a, const unsigned char& b) { return (unsigned char)pow((float)a, (float)b); }
};
template <>
struct power_func<unsigned short>
{
power_func(){};
__device__ unsigned short operator () (const unsigned short& a, const unsigned short& b) { return (unsigned short)pow((float)a, (float)b); }
};
template <class T>
struct isnan_func
{
isnan_func(){};
__device__ T operator () (const T& a, const T& b) { return a; }
};
template <>
struct isnan_func <float>
{
isnan_func(){};
__device__ float operator () (const float& a, const float& b) { return(::isnan(a) ? b : a); }
};
template <>
struct isnan_func <double>
{
isnan_func(){};
__device__ double operator () (const double& a, const double& b) { return(::isnan(a) ? b : a); }
};
template <class T>
struct isinf_func
{
isinf_func(){};
__device__ T operator () (const T& a, const T& b) { return a; }
};
template <>
struct isinf_func <float>
{
isinf_func(){};
__device__ float operator () (const float& a, const float& b) { return(::isinf(a) ? b : a); }
};
template <>
struct isinf_func <double>
{
isinf_func(){};
__device__ double operator () (const double& a, const double& b) { return(::isinf(a) ? b : a); }
};
template <class T>
struct equal_func
{
equal_func(){};
__device__ T operator () (const T& a, const T& b) { return b; }
};
template <class T>
struct threshold_func
{
threshold_func(){};
__device__ T operator () (const T& a, const T& b) { return (a > b ? a : b); }
};
//inspired by https://github.com/DrMikeMorgan/Cuda/blob/master/functors.cu.h on how to use functors for CUDA
//starting with CUDA 7.0 we can probably use lambda functions instead of struct (CUDA 7.0 inciroirates C++11 standards)
template<class T, class operation>
__global__ void elementwiseOperationInPlace_kernel(T *A, const T *B, std::uint64_t arrayLength, operation op)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
A[tid] = op(A[tid], B[tid]);
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T>
__global__ void elementwiseOperationInPlace__TVreg_kernel(T *A, const T *B, std::uint64_t arrayLength, T lambda)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
A[tid] /= max(1.0 - lambda * B[tid], 1e-3);//we avoid "crazy" updates by setting a maximum
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T, class operation>
__global__ void elementwiseOperationInPlace_kernel(T *A, const T B, std::uint64_t arrayLength, operation op)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
A[tid] = op(A[tid], B);
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T, class operation>
__global__ void elementwiseOperationOutOfPlace_kernel(T* C, const T *A, const T *B, std::uint64_t arrayLength, operation op)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
C[tid] = op(A[tid], B[tid]);
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T, class operation>
__global__ void elementwiseOperationOutOfPlace_kernel(T* C, const T A, const T *B, std::uint64_t arrayLength, operation op)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
C[tid] = op(A, B[tid]);
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T, class operation>
__global__ void elementwiseOperationOutOfPlace_compund_kernel(T* C, const T *A, const T *B, std::uint64_t arrayLength, operation op)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
C[tid] += op(A[tid], B[tid]);
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T, class operation>
__global__ void elementwiseOperationOutOfPlace_compund_kernel(T* C, const T A, const T *B, std::uint64_t arrayLength, operation op)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
C[tid] += op(A, B[tid]);
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T, class operation>
__global__ void reductionOperation_kernel(const T *A, T* temp_accumulator_CUDA, std::uint64_t arrayLength, operation op, T defaultVal)
{
__shared__ T copyShared[THREADS_CUDA_FOR_REDUCTION];//blockDim.x = THREADS_CUDA_FOR_REDUCTION
//copy to share memory
if (blockDim.x * blockIdx.x + threadIdx.x < arrayLength)
copyShared[threadIdx.x] = A[blockDim.x * blockIdx.x + threadIdx.x];
else
copyShared[threadIdx.x] = defaultVal;//depending on the reduction operation we want different default values here
__syncthreads();
//perform reduction
int i = blockDim.x / 2;
while (i != 0)
{
if (threadIdx.x < i)
copyShared[threadIdx.x] = op(copyShared[threadIdx.x], copyShared[threadIdx.x + i]);
__syncthreads();
i /= 2;
}
//store reduction value for this block
if ( threadIdx.x == 0)
temp_accumulator_CUDA[blockIdx.x] = copyShared[0];
}
//==============================================================================================
void elementwiseOperationInPlace_TVreg(float* A, const float* B, std::uint64_t arrayLength, float lambdaTV)
{
int numThreads = ::min((uint64_t)MAX_THREADS_CUDA / 4, arrayLength);//profiling it is better to not use all threads for better occupancy
int numBlocks = ::min((uint64_t)MAX_BLOCKS_CUDA, (uint64_t)(arrayLength + (uint64_t)(numThreads - 1)) / ((uint64_t)numThreads));
elementwiseOperationInPlace__TVreg_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, lambdaTV); HANDLE_ERROR_KERNEL;
}
//==============================================================================================
template<class T>
void elementwiseOperationInPlace(T* A, const T* B, std::uint64_t arrayLength, op_elementwise_type op)
{
int numThreads = ::min((uint64_t)MAX_THREADS_CUDA / 4, arrayLength);//profiling it is better to not use all threads for better occupancy
int numBlocks = ::min((uint64_t)MAX_BLOCKS_CUDA, (uint64_t)(arrayLength + (uint64_t)(numThreads - 1)) / ((uint64_t)numThreads));
switch (op)
{
case op_elementwise_type::plus:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, add_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, sub_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::multiply:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, mul_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::divide:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, div_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::divide_inv:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, div_inv_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::copy:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, equal_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus_positive:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, sub_pos_func<T>()); HANDLE_ERROR_KERNEL;
break;
default:
cout << "ERROR: elementwiseOperationInPlace: operation not supported" << endl;
}
}
//==============================================================================================
template<class T>
void elementwiseOperationInPlace(T* A, const T B, std::uint64_t arrayLength, op_elementwise_type op)
{
int numThreads = ::min((uint64_t)MAX_THREADS_CUDA / 4, arrayLength);//profiling it is better to not use all threads for better occupancy
int numBlocks = ::min((uint64_t)MAX_BLOCKS_CUDA, (uint64_t)(arrayLength + (uint64_t)(numThreads - 1)) / ((uint64_t)numThreads));
switch (op)
{
case op_elementwise_type::plus:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, add_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, sub_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::multiply:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, mul_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::divide:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, div_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::copy:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, equal_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus_positive:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, sub_pos_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::isnanop:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, isnan_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::isinfop:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, isinf_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::power:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, power_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::threshold:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, threshold_func<T>()); HANDLE_ERROR_KERNEL;
break;
default:
cout << "ERROR: elementwiseOperationInPlace: operation not supported" << endl;
}
}
//==========================================================================================================
template<class T>
void elementwiseOperationOutOfPlace(T* C, const T* A, const T* B, std::uint64_t arrayLength, op_elementwise_type op)
{
int numThreads = ::min((uint64_t)MAX_THREADS_CUDA / 4, arrayLength);//profiling it is better to not use all threads for better occupancy
int numBlocks = ::min((uint64_t)MAX_BLOCKS_CUDA, (uint64_t)(arrayLength + (uint64_t)(numThreads - 1)) / ((uint64_t)numThreads));
switch (op)
{
case op_elementwise_type::plus:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, add_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, sub_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::multiply:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, mul_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::divide:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, div_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::compound_plus:
elementwiseOperationOutOfPlace_compund_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, add_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::compound_multiply:
elementwiseOperationOutOfPlace_compund_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, mul_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus_positive:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, sub_pos_func<T>()); HANDLE_ERROR_KERNEL;
break;
default:
cout << "ERROR: elementwiseOperationInPlace: operation not supported" << endl;
}
}
//==========================================================================================================
template<class T>
void elementwiseOperationOutOfPlace(T* C, const T A, const T* B, std::uint64_t arrayLength, op_elementwise_type op)
{
int numThreads = ::min((uint64_t)MAX_THREADS_CUDA / 4, arrayLength);//profiling it is better to not use all threads for better occupancy
int numBlocks = ::min((uint64_t)MAX_BLOCKS_CUDA, (uint64_t)(arrayLength + (uint64_t)(numThreads - 1)) / ((uint64_t)numThreads));
switch (op)
{
case op_elementwise_type::plus:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, add_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, sub_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::multiply:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, mul_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::divide:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, div_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::compound_plus:
elementwiseOperationOutOfPlace_compund_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, add_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::compound_multiply:
elementwiseOperationOutOfPlace_compund_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, mul_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus_positive:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, sub_pos_func<T>()); HANDLE_ERROR_KERNEL;
break;
default:
cout << "ERROR: elementwiseOperationInPlace: operation not supported" << endl;
}
}
//======================================================================
template<class T>
T reductionOperation(const T* A, std::uint64_t arrayLength, op_reduction_type op)
{
const int numThreads = THREADS_CUDA_FOR_REDUCTION;//profiling it is better to not use all threads for better occupancy
const int numBlocks = ::min((uint64_t)MAX_BLOCKS_CUDA, (uint64_t)(arrayLength + (uint64_t)(numThreads - 1)) / ((uint64_t)numThreads));
const uint64_t chunkSize = ((uint64_t)(numThreads)) * ((uint64_t)(numBlocks));
//allocate temporary memory to finish the reduction on the CPU
T* reduction_CPU;
T* reduction_GPU;
HANDLE_ERROR(hipMalloc((void**)&(reduction_GPU), numBlocks * sizeof(T)));
HANDLE_ERROR(hipHostMalloc((void**)&reduction_CPU, numBlocks * sizeof(T))); // host pinned for faster transfers
//initialize result
T finalVal;
switch (op)
{
case op_reduction_type::add:
finalVal = 0;
break;
case op_reduction_type::max_elem:
finalVal = numeric_limits<T>::min();
break;
case op_reduction_type::min_elem:
finalVal = numeric_limits<T>::max();
break;
default:
cout << "ERROR: reductionOperation: operation not supported" << endl;
}
//main loop
std::uint64_t offset = 0;
std::uint64_t length, arrayLengthOrig = arrayLength;
while (offset < arrayLengthOrig)
{
const T* ptr = &(A[offset]);
length = min(arrayLength, chunkSize);
switch (op)
{
case op_reduction_type::add:
reductionOperation_kernel << <numBlocks, numThreads >> >(ptr, reduction_GPU, length, add_func<T>(), T(0)); HANDLE_ERROR_KERNEL;
HANDLE_ERROR(hipMemcpy(reduction_CPU, reduction_GPU, numBlocks * sizeof(T), hipMemcpyDeviceToHost));
for (int ii = 0; ii < numBlocks; ii++)
finalVal += reduction_CPU[ii];
break;
case op_reduction_type::max_elem:
// Should the default value here really be numeric_limits<T>::min() ?
// Why not -1.0*numeric_limits<T>::infinity() , which is the identity value under the max(x,y) operation?
// Or maybe numeric_limits<T>::lower(), which is a negative number with a large absolute value?
// (numeric_limits<T>::min() is a positive number with a small absolute value...)
// Or maybe just make the caller supply the default value explicitly?
reductionOperation_kernel << <numBlocks, numThreads >> >(ptr, reduction_GPU, length, max_func<T>(), numeric_limits<T>::min()); HANDLE_ERROR_KERNEL;
HANDLE_ERROR(hipMemcpy(reduction_CPU, reduction_GPU, numBlocks * sizeof(T), hipMemcpyDeviceToHost));
for (int ii = 0; ii < numBlocks; ii++)
finalVal = max(reduction_CPU[ii], finalVal);
break;
case op_reduction_type::min_elem:
// Should the default value here really be numeric_limits<T>::max() ? Why not numeric_limits<T>::infinity() , which is the identity value under the min(x,y) operation?
reductionOperation_kernel << <numBlocks, numThreads >> >(ptr, reduction_GPU, length, min_func<T>(), numeric_limits<T>::max()); HANDLE_ERROR_KERNEL;
HANDLE_ERROR(hipMemcpy(reduction_CPU, reduction_GPU, numBlocks * sizeof(T), hipMemcpyDeviceToHost));
for (int ii = 0; ii < numBlocks; ii++)
finalVal = min(reduction_CPU[ii], finalVal);
break;
default:
cout << "ERROR: reductionOperation: operation not supported" << endl;
}
offset += chunkSize;
arrayLength -= chunkSize;
}
//release memory
HANDLE_ERROR(hipFree(reduction_GPU));
HANDLE_ERROR(hipHostFree(reduction_CPU));
return finalVal;
}
//==================================================================
template<class T>
T* allocateMem_GPU(size_t numElem)
{
T* ptr;
HANDLE_ERROR(hipMalloc((void**)&(ptr), numElem * sizeof(T)));
return ptr;
};
template<class T>
void deallocateMem_GPU(T* ptr)
{
if ( ptr != NULL)
HANDLE_ERROR(hipFree(ptr));//you still need to set the pointer to NULL
};
template<class T>
void copy_GPU_to_CPU(T* ptr_CPU, const T* ptr_CUDA, size_t numElem)
{
HANDLE_ERROR(hipMemcpy(ptr_CPU, ptr_CUDA, numElem * sizeof(T), hipMemcpyDeviceToHost));
}
template<class T>
void copy_CPU_to_GPU(const T* ptr_CPU, T* ptr_CUDA, size_t numElem)
{
HANDLE_ERROR(hipMemcpy(ptr_CUDA, ptr_CPU, numElem * sizeof(T), hipMemcpyHostToDevice));
}
//======================================================================
//instantiate templates
template void elementwiseOperationInPlace<std::uint8_t>(std::uint8_t* A, const std::uint8_t* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationInPlace<std::uint16_t>(std::uint16_t* A, const std::uint16_t* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationInPlace<float>(float* A, const float* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationInPlace<std::uint8_t>(std::uint8_t* A, const std::uint8_t B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationInPlace<std::uint16_t>(std::uint16_t* A, const std::uint16_t B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationInPlace<float>(float* A, const float B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationOutOfPlace<float>(float* C, const float* A, const float* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationOutOfPlace<std::uint16_t>(std::uint16_t* C, const std::uint16_t* A, const std::uint16_t* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationOutOfPlace<std::uint8_t>(std::uint8_t* C, const std::uint8_t* A, const std::uint8_t* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationOutOfPlace<float>(float* C, const float A, const float* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationOutOfPlace<std::uint16_t>(std::uint16_t* C, const std::uint16_t A, const std::uint16_t* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationOutOfPlace<std::uint8_t>(std::uint8_t* C, const std::uint8_t A, const std::uint8_t* B, std::uint64_t arrayLength, op_elementwise_type op);
template float reductionOperation<float>(const float* A, std::uint64_t arrayLength, op_reduction_type op);
template std::uint8_t reductionOperation<std::uint8_t>(const std::uint8_t* A, std::uint64_t arrayLength, op_reduction_type op);
template std::uint16_t reductionOperation<std::uint16_t>(const std::uint16_t* A, std::uint64_t arrayLength, op_reduction_type op);
template double reductionOperation<double>(const double* A, std::uint64_t arrayLength, op_reduction_type op);
template void deallocateMem_GPU<float>(float* ptr);
template void deallocateMem_GPU<std::uint8_t>(std::uint8_t* ptr);
template void deallocateMem_GPU<std::uint16_t>(std::uint16_t* ptr);
template float* allocateMem_GPU<float>(size_t numElem);
template std::uint8_t* allocateMem_GPU<std::uint8_t>(size_t numElem);
template std::uint16_t* allocateMem_GPU<std::uint16_t>(size_t numElem);
template void copy_GPU_to_CPU<float>(float* ptr_CPU, const float* ptr_CUDA, size_t numElem);
template void copy_GPU_to_CPU<std::uint8_t>(std::uint8_t* ptr_CPU,const std::uint8_t* ptr_CUDA, size_t numElem);
template void copy_GPU_to_CPU<std::uint16_t>(std::uint16_t* ptr_CPU, const std::uint16_t* ptr_CUDA, size_t numElem);
template void copy_CPU_to_GPU<float>(const float* ptr_CPU, float* ptr_CUDA, size_t numElem);
template void copy_CPU_to_GPU<std::uint8_t>(const std::uint8_t* ptr_CPU, std::uint8_t* ptr_CUDA, size_t numElem);
template void copy_CPU_to_GPU<std::uint16_t>(const std::uint16_t* ptr_CPU, std::uint16_t* ptr_CUDA, size_t numElem); | d80c67baaec916f42bb689ed58d8f67a2805130d.cu | /*
* Copyright (C) 2015 by Fernando Amat
* See license.txt for full license and copyright notice.
*
* Authors: Fernando Amat
* commonCUDA.cu
*
* Created on: June 5th, 2015
* Author: Fernando Amat
*
* \brief common functions and constants for CUDA
*/
#include <cstdint>
#include <algorithm>
#include <iostream>
#include "commonCUDA.h"
#include "cuda.h"
#include "book.h"
using namespace std;
//profiling it is better to not use all the threads
#define THREADS_CUDA_FOR_REDUCTION (MAX_THREADS_CUDA / 4)
/**
* functor for adding two numbers
*/
template <class T>
struct max_func
{
max_func(){};
__device__ T operator () (const T& a, const T& b) { return (a > b ? a : b); }
};
template <class T>
struct min_func
{
min_func(){};
__device__ T operator () (const T& a, const T& b) { return (a > b ? b : a); }
};
template <class T>
struct add_func
{
add_func(){};
__device__ T operator () (const T& a, const T& b) { return a + b; }
};
template <class T>
struct sub_func
{
sub_func(){};
__device__ T operator () (const T& a, const T& b) { return a - b; }
};
template <class T>
struct sub_pos_func
{
sub_pos_func(){};
__device__ T operator () (const T& a, const T& b) { return (a > b ? a-b : 0); }//lower bounded by zero
};
template <class T>
struct div_func
{
div_func(){};
__device__ T operator () (const T& a, const T& b) { return a / b; }
};
template <class T>
struct div_inv_func
{
div_inv_func(){};
__device__ T operator () (const T& a, const T& b) { return b / a; }
};
template <class T>
struct mul_func
{
mul_func(){};
__device__ T operator () (const T& a, const T& b) { return a * b; }
};
template <class T>
struct power_func
{
power_func(){};
__device__ T operator () (const T& a, const T& b) { return pow(a,b); }
};
template <>
struct power_func<unsigned char>
{
power_func(){};
__device__ unsigned char operator () (const unsigned char& a, const unsigned char& b) { return (unsigned char)pow((float)a, (float)b); }
};
template <>
struct power_func<unsigned short>
{
power_func(){};
__device__ unsigned short operator () (const unsigned short& a, const unsigned short& b) { return (unsigned short)pow((float)a, (float)b); }
};
template <class T>
struct isnan_func
{
isnan_func(){};
__device__ T operator () (const T& a, const T& b) { return a; }
};
template <>
struct isnan_func <float>
{
isnan_func(){};
__device__ float operator () (const float& a, const float& b) { return(::isnan(a) ? b : a); }
};
template <>
struct isnan_func <double>
{
isnan_func(){};
__device__ double operator () (const double& a, const double& b) { return(::isnan(a) ? b : a); }
};
template <class T>
struct isinf_func
{
isinf_func(){};
__device__ T operator () (const T& a, const T& b) { return a; }
};
template <>
struct isinf_func <float>
{
isinf_func(){};
__device__ float operator () (const float& a, const float& b) { return(::isinf(a) ? b : a); }
};
template <>
struct isinf_func <double>
{
isinf_func(){};
__device__ double operator () (const double& a, const double& b) { return(::isinf(a) ? b : a); }
};
template <class T>
struct equal_func
{
equal_func(){};
__device__ T operator () (const T& a, const T& b) { return b; }
};
template <class T>
struct threshold_func
{
threshold_func(){};
__device__ T operator () (const T& a, const T& b) { return (a > b ? a : b); }
};
//inspired by https://github.com/DrMikeMorgan/Cuda/blob/master/functors.cu.h on how to use functors for CUDA
//starting with CUDA 7.0 we can probably use lambda functions instead of struct (CUDA 7.0 inciroirates C++11 standards)
template<class T, class operation>
__global__ void elementwiseOperationInPlace_kernel(T *A, const T *B, std::uint64_t arrayLength, operation op)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
A[tid] = op(A[tid], B[tid]);
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T>
__global__ void elementwiseOperationInPlace__TVreg_kernel(T *A, const T *B, std::uint64_t arrayLength, T lambda)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
A[tid] /= max(1.0 - lambda * B[tid], 1e-3);//we avoid "crazy" updates by setting a maximum
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T, class operation>
__global__ void elementwiseOperationInPlace_kernel(T *A, const T B, std::uint64_t arrayLength, operation op)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
A[tid] = op(A[tid], B);
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T, class operation>
__global__ void elementwiseOperationOutOfPlace_kernel(T* C, const T *A, const T *B, std::uint64_t arrayLength, operation op)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
C[tid] = op(A[tid], B[tid]);
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T, class operation>
__global__ void elementwiseOperationOutOfPlace_kernel(T* C, const T A, const T *B, std::uint64_t arrayLength, operation op)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
C[tid] = op(A, B[tid]);
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T, class operation>
__global__ void elementwiseOperationOutOfPlace_compund_kernel(T* C, const T *A, const T *B, std::uint64_t arrayLength, operation op)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
C[tid] += op(A[tid], B[tid]);
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T, class operation>
__global__ void elementwiseOperationOutOfPlace_compund_kernel(T* C, const T A, const T *B, std::uint64_t arrayLength, operation op)
{
std::uint64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < arrayLength)
{
C[tid] += op(A, B[tid]);
tid += blockDim.x * gridDim.x;
}
}
//==============================================================================================
template<class T, class operation>
__global__ void reductionOperation_kernel(const T *A, T* temp_accumulator_CUDA, std::uint64_t arrayLength, operation op, T defaultVal)
{
__shared__ T copyShared[THREADS_CUDA_FOR_REDUCTION];//blockDim.x = THREADS_CUDA_FOR_REDUCTION
//copy to share memory
if (blockDim.x * blockIdx.x + threadIdx.x < arrayLength)
copyShared[threadIdx.x] = A[blockDim.x * blockIdx.x + threadIdx.x];
else
copyShared[threadIdx.x] = defaultVal;//depending on the reduction operation we want different default values here
__syncthreads();
//perform reduction
int i = blockDim.x / 2;
while (i != 0)
{
if (threadIdx.x < i)
copyShared[threadIdx.x] = op(copyShared[threadIdx.x], copyShared[threadIdx.x + i]);
__syncthreads();
i /= 2;
}
//store reduction value for this block
if ( threadIdx.x == 0)
temp_accumulator_CUDA[blockIdx.x] = copyShared[0];
}
//==============================================================================================
void elementwiseOperationInPlace_TVreg(float* A, const float* B, std::uint64_t arrayLength, float lambdaTV)
{
int numThreads = std::min((uint64_t)MAX_THREADS_CUDA / 4, arrayLength);//profiling it is better to not use all threads for better occupancy
int numBlocks = std::min((uint64_t)MAX_BLOCKS_CUDA, (uint64_t)(arrayLength + (uint64_t)(numThreads - 1)) / ((uint64_t)numThreads));
elementwiseOperationInPlace__TVreg_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, lambdaTV); HANDLE_ERROR_KERNEL;
}
//==============================================================================================
template<class T>
void elementwiseOperationInPlace(T* A, const T* B, std::uint64_t arrayLength, op_elementwise_type op)
{
int numThreads = std::min((uint64_t)MAX_THREADS_CUDA / 4, arrayLength);//profiling it is better to not use all threads for better occupancy
int numBlocks = std::min((uint64_t)MAX_BLOCKS_CUDA, (uint64_t)(arrayLength + (uint64_t)(numThreads - 1)) / ((uint64_t)numThreads));
switch (op)
{
case op_elementwise_type::plus:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, add_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, sub_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::multiply:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, mul_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::divide:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, div_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::divide_inv:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, div_inv_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::copy:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, equal_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus_positive:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, sub_pos_func<T>()); HANDLE_ERROR_KERNEL;
break;
default:
cout << "ERROR: elementwiseOperationInPlace: operation not supported" << endl;
}
}
//==============================================================================================
template<class T>
void elementwiseOperationInPlace(T* A, const T B, std::uint64_t arrayLength, op_elementwise_type op)
{
int numThreads = std::min((uint64_t)MAX_THREADS_CUDA / 4, arrayLength);//profiling it is better to not use all threads for better occupancy
int numBlocks = std::min((uint64_t)MAX_BLOCKS_CUDA, (uint64_t)(arrayLength + (uint64_t)(numThreads - 1)) / ((uint64_t)numThreads));
switch (op)
{
case op_elementwise_type::plus:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, add_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, sub_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::multiply:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, mul_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::divide:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, div_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::copy:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, equal_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus_positive:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, sub_pos_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::isnanop:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, isnan_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::isinfop:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, isinf_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::power:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, power_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::threshold:
elementwiseOperationInPlace_kernel << <numBlocks, numThreads >> > (A, B, arrayLength, threshold_func<T>()); HANDLE_ERROR_KERNEL;
break;
default:
cout << "ERROR: elementwiseOperationInPlace: operation not supported" << endl;
}
}
//==========================================================================================================
template<class T>
void elementwiseOperationOutOfPlace(T* C, const T* A, const T* B, std::uint64_t arrayLength, op_elementwise_type op)
{
int numThreads = std::min((uint64_t)MAX_THREADS_CUDA / 4, arrayLength);//profiling it is better to not use all threads for better occupancy
int numBlocks = std::min((uint64_t)MAX_BLOCKS_CUDA, (uint64_t)(arrayLength + (uint64_t)(numThreads - 1)) / ((uint64_t)numThreads));
switch (op)
{
case op_elementwise_type::plus:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, add_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, sub_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::multiply:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, mul_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::divide:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, div_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::compound_plus:
elementwiseOperationOutOfPlace_compund_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, add_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::compound_multiply:
elementwiseOperationOutOfPlace_compund_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, mul_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus_positive:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, sub_pos_func<T>()); HANDLE_ERROR_KERNEL;
break;
default:
cout << "ERROR: elementwiseOperationInPlace: operation not supported" << endl;
}
}
//==========================================================================================================
template<class T>
void elementwiseOperationOutOfPlace(T* C, const T A, const T* B, std::uint64_t arrayLength, op_elementwise_type op)
{
int numThreads = std::min((uint64_t)MAX_THREADS_CUDA / 4, arrayLength);//profiling it is better to not use all threads for better occupancy
int numBlocks = std::min((uint64_t)MAX_BLOCKS_CUDA, (uint64_t)(arrayLength + (uint64_t)(numThreads - 1)) / ((uint64_t)numThreads));
switch (op)
{
case op_elementwise_type::plus:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, add_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, sub_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::multiply:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, mul_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::divide:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, div_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::compound_plus:
elementwiseOperationOutOfPlace_compund_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, add_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::compound_multiply:
elementwiseOperationOutOfPlace_compund_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, mul_func<T>()); HANDLE_ERROR_KERNEL;
break;
case op_elementwise_type::minus_positive:
elementwiseOperationOutOfPlace_kernel << <numBlocks, numThreads >> > (C, A, B, arrayLength, sub_pos_func<T>()); HANDLE_ERROR_KERNEL;
break;
default:
cout << "ERROR: elementwiseOperationInPlace: operation not supported" << endl;
}
}
//======================================================================
template<class T>
T reductionOperation(const T* A, std::uint64_t arrayLength, op_reduction_type op)
{
const int numThreads = THREADS_CUDA_FOR_REDUCTION;//profiling it is better to not use all threads for better occupancy
const int numBlocks = std::min((uint64_t)MAX_BLOCKS_CUDA, (uint64_t)(arrayLength + (uint64_t)(numThreads - 1)) / ((uint64_t)numThreads));
const uint64_t chunkSize = ((uint64_t)(numThreads)) * ((uint64_t)(numBlocks));
//allocate temporary memory to finish the reduction on the CPU
T* reduction_CPU;
T* reduction_GPU;
HANDLE_ERROR(cudaMalloc((void**)&(reduction_GPU), numBlocks * sizeof(T)));
HANDLE_ERROR(cudaMallocHost((void**)&reduction_CPU, numBlocks * sizeof(T))); // host pinned for faster transfers
//initialize result
T finalVal;
switch (op)
{
case op_reduction_type::add:
finalVal = 0;
break;
case op_reduction_type::max_elem:
finalVal = numeric_limits<T>::min();
break;
case op_reduction_type::min_elem:
finalVal = numeric_limits<T>::max();
break;
default:
cout << "ERROR: reductionOperation: operation not supported" << endl;
}
//main loop
std::uint64_t offset = 0;
std::uint64_t length, arrayLengthOrig = arrayLength;
while (offset < arrayLengthOrig)
{
const T* ptr = &(A[offset]);
length = min(arrayLength, chunkSize);
switch (op)
{
case op_reduction_type::add:
reductionOperation_kernel << <numBlocks, numThreads >> >(ptr, reduction_GPU, length, add_func<T>(), T(0)); HANDLE_ERROR_KERNEL;
HANDLE_ERROR(cudaMemcpy(reduction_CPU, reduction_GPU, numBlocks * sizeof(T), cudaMemcpyDeviceToHost));
for (int ii = 0; ii < numBlocks; ii++)
finalVal += reduction_CPU[ii];
break;
case op_reduction_type::max_elem:
// Should the default value here really be numeric_limits<T>::min() ?
// Why not -1.0*numeric_limits<T>::infinity() , which is the identity value under the max(x,y) operation?
// Or maybe numeric_limits<T>::lower(), which is a negative number with a large absolute value?
// (numeric_limits<T>::min() is a positive number with a small absolute value...)
// Or maybe just make the caller supply the default value explicitly?
reductionOperation_kernel << <numBlocks, numThreads >> >(ptr, reduction_GPU, length, max_func<T>(), numeric_limits<T>::min()); HANDLE_ERROR_KERNEL;
HANDLE_ERROR(cudaMemcpy(reduction_CPU, reduction_GPU, numBlocks * sizeof(T), cudaMemcpyDeviceToHost));
for (int ii = 0; ii < numBlocks; ii++)
finalVal = max(reduction_CPU[ii], finalVal);
break;
case op_reduction_type::min_elem:
// Should the default value here really be numeric_limits<T>::max() ? Why not numeric_limits<T>::infinity() , which is the identity value under the min(x,y) operation?
reductionOperation_kernel << <numBlocks, numThreads >> >(ptr, reduction_GPU, length, min_func<T>(), numeric_limits<T>::max()); HANDLE_ERROR_KERNEL;
HANDLE_ERROR(cudaMemcpy(reduction_CPU, reduction_GPU, numBlocks * sizeof(T), cudaMemcpyDeviceToHost));
for (int ii = 0; ii < numBlocks; ii++)
finalVal = min(reduction_CPU[ii], finalVal);
break;
default:
cout << "ERROR: reductionOperation: operation not supported" << endl;
}
offset += chunkSize;
arrayLength -= chunkSize;
}
//release memory
HANDLE_ERROR(cudaFree(reduction_GPU));
HANDLE_ERROR(cudaFreeHost(reduction_CPU));
return finalVal;
}
//==================================================================
template<class T>
T* allocateMem_GPU(size_t numElem)
{
T* ptr;
HANDLE_ERROR(cudaMalloc((void**)&(ptr), numElem * sizeof(T)));
return ptr;
};
template<class T>
void deallocateMem_GPU(T* ptr)
{
if ( ptr != NULL)
HANDLE_ERROR(cudaFree(ptr));//you still need to set the pointer to NULL
};
template<class T>
void copy_GPU_to_CPU(T* ptr_CPU, const T* ptr_CUDA, size_t numElem)
{
HANDLE_ERROR(cudaMemcpy(ptr_CPU, ptr_CUDA, numElem * sizeof(T), cudaMemcpyDeviceToHost));
}
template<class T>
void copy_CPU_to_GPU(const T* ptr_CPU, T* ptr_CUDA, size_t numElem)
{
HANDLE_ERROR(cudaMemcpy(ptr_CUDA, ptr_CPU, numElem * sizeof(T), cudaMemcpyHostToDevice));
}
//======================================================================
//instantiate templates
template void elementwiseOperationInPlace<std::uint8_t>(std::uint8_t* A, const std::uint8_t* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationInPlace<std::uint16_t>(std::uint16_t* A, const std::uint16_t* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationInPlace<float>(float* A, const float* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationInPlace<std::uint8_t>(std::uint8_t* A, const std::uint8_t B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationInPlace<std::uint16_t>(std::uint16_t* A, const std::uint16_t B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationInPlace<float>(float* A, const float B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationOutOfPlace<float>(float* C, const float* A, const float* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationOutOfPlace<std::uint16_t>(std::uint16_t* C, const std::uint16_t* A, const std::uint16_t* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationOutOfPlace<std::uint8_t>(std::uint8_t* C, const std::uint8_t* A, const std::uint8_t* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationOutOfPlace<float>(float* C, const float A, const float* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationOutOfPlace<std::uint16_t>(std::uint16_t* C, const std::uint16_t A, const std::uint16_t* B, std::uint64_t arrayLength, op_elementwise_type op);
template void elementwiseOperationOutOfPlace<std::uint8_t>(std::uint8_t* C, const std::uint8_t A, const std::uint8_t* B, std::uint64_t arrayLength, op_elementwise_type op);
template float reductionOperation<float>(const float* A, std::uint64_t arrayLength, op_reduction_type op);
template std::uint8_t reductionOperation<std::uint8_t>(const std::uint8_t* A, std::uint64_t arrayLength, op_reduction_type op);
template std::uint16_t reductionOperation<std::uint16_t>(const std::uint16_t* A, std::uint64_t arrayLength, op_reduction_type op);
template double reductionOperation<double>(const double* A, std::uint64_t arrayLength, op_reduction_type op);
template void deallocateMem_GPU<float>(float* ptr);
template void deallocateMem_GPU<std::uint8_t>(std::uint8_t* ptr);
template void deallocateMem_GPU<std::uint16_t>(std::uint16_t* ptr);
template float* allocateMem_GPU<float>(size_t numElem);
template std::uint8_t* allocateMem_GPU<std::uint8_t>(size_t numElem);
template std::uint16_t* allocateMem_GPU<std::uint16_t>(size_t numElem);
template void copy_GPU_to_CPU<float>(float* ptr_CPU, const float* ptr_CUDA, size_t numElem);
template void copy_GPU_to_CPU<std::uint8_t>(std::uint8_t* ptr_CPU,const std::uint8_t* ptr_CUDA, size_t numElem);
template void copy_GPU_to_CPU<std::uint16_t>(std::uint16_t* ptr_CPU, const std::uint16_t* ptr_CUDA, size_t numElem);
template void copy_CPU_to_GPU<float>(const float* ptr_CPU, float* ptr_CUDA, size_t numElem);
template void copy_CPU_to_GPU<std::uint8_t>(const std::uint8_t* ptr_CPU, std::uint8_t* ptr_CUDA, size_t numElem);
template void copy_CPU_to_GPU<std::uint16_t>(const std::uint16_t* ptr_CPU, std::uint16_t* ptr_CUDA, size_t numElem); |
36cd937df9e8e7ec974669ed7ce672f61cacc75b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include "device_launch_parameters.h"
#include "kernels.h"
#include "tests.h"
#include <stdio.h>
#include "const.h"
// some defines to avoid repetition
#define IF_LAST if(id == (warpSize - 1))
#define WARP_OPERATION_DOWN(OP, NAME) \
__inline__ __device__ int NAME(int val){ \
for (int mask = WARP_SIZE/2; mask > 0; mask /= 2)\
val OP __shfl_xor(val, mask);\
return val; \
}\
WARP_OPERATION_DOWN(|=, orWithinWarp);
__inline__ __device__ int localScan(int val, int id){
for(int i = 1; i < 32; i<<=1){
int ret = __shfl_up(val, i);
val += id >= i ? ret : 0;
}
return val;
}
__inline__ __device__ void markEndWordTypes(int w, int* end, int id){
IF_LAST{
end[threadIdx.y] = w;
}
}
__inline__ __device__ void writeEndingSize(int id, int* lengths, int size){
IF_LAST{
lengths[threadIdx.y] = size;
}
}
namespace regular_kernels{
/*
* Device function performing compression
*
* Parameters:
* data - device pointer to data to be compressed
* output - device pointer to the output array
* blockCounts - device pointer to an array with block sizes
* dataSize - input data size in integers
*/
template<class T>
__global__ void gpu_compressData(unsigned int* data, unsigned int* output, T* blockCounts, int dataSize) {
// count of words for every warp
__shared__ int counts[32];
// length of the last word in a warp
__shared__ int endLengths[32];
// type of the last word in a warp
__shared__ int endings[32];
// type of the first word in a warp
__shared__ int beginnings[32];
// array indicating whether the last thread of a warp has been merged
__shared__ bool merged[32];
// -- Prepare initial variables and read data --
// get thread id
int id = threadIdx.x;
int id_global = blockIdx.x * (31*32) + threadIdx.y *31 + id;
unsigned int word = 0;
if(id_global > dataSize) return;
// retrieve word, only first 31 threads
if (id < WARP_SIZE - 1) {
word = data[id_global];
}
// divide words into 31bit parts
// gets 31 - id bits from one lane above
// and id bits from own lane
//word = (__shfl_down(word, 1) & (ONES31 >> id)) << id | (word & TOP31ONES) >> (32 - id);
word = ONES31 & ((__shfl_up(word, 1) >> (32 - id)) | (word << id));
// -- Recognize and mark words --
// word info variables
int ones = 0;
int zeros = 0;
int literals = 0;
int type;
// mark word types for warp
// detect words with zeros and words with ones
// is a zero fill word
if (word == ZEROS) {
zeros |= 1 << id;
type = WORD_ZEROS;
// last word in a warp marks word as zeros
markEndWordTypes(WORD_ZEROS, endings, id);
}
// is a one fill word
else if (word == ONES31) {
ones |= 1 << id;
type = WORD_ONES;
// last word in a warp marks word as ones
markEndWordTypes(WORD_ONES, endings, id);
}
else
{
type = WORD_LITERAL;
// last word in a warp marks word as literal
markEndWordTypes(WORD_LITERAL, endings, id);
}
// exchange word information within the warp
zeros = orWithinWarp(zeros);
ones = orWithinWarp(ones);
literals = ~(zeros | ones);
// send complete information to other threads
// if (id == WARP_LEADER) {
// zeros = __shfl(zeros, 0);
// ones = __shfl(ones, 0);
// literals = __shfl(literals, 0);
// }
int n = 0x3 << id;
int flags = BIT31;
bool idle = true;
// if is not last
if (id < 31) {
int res = 1 << id;
if (((n & zeros) == res || (n & ones) == res || (literals & (1 << id)) > 0)) {
// detect endings of sequences of words of the same type and mark them
flags |= 1 << id;
idle = false;
}
}
else{
idle = false;
}
// -- Calculate block size --
// exchange endings
flags = orWithinWarp(flags);
int blockSize = 1;
// index within warp
int index = __popc(((1<<id) - 1) & flags);
// if first word in block, write beginning
if(index == 0){
beginnings[threadIdx.y] = type;
}
// calculate the number of words within a compressed word
if (!idle) {
for (int i = id-1; i >= 0; i--) {
if ((flags & (1 << i)) > 0) {
break;
}
blockSize++;
}
if (word == ONES31) {
// word = BIT3130;
writeEndingSize(id, endLengths, blockSize);
}
else if (word == ZEROS) {
// word = BIT31;
writeEndingSize(id, endLengths, blockSize);
}
else{
writeEndingSize(id, endLengths, 0);
}
}
// last thread calculates the number of words and writes it to the shared array
if(id == WARP_LEADER){
counts[threadIdx.y] = __popc(flags);
}
// sync all threads within block
__syncthreads();
// -- Merging warps --
// the first warp scans the array and gets total block word size
// then calculates offset
int mergeShift = 0;
if(threadIdx.y == BLOCK_LEADER){
merged[id] = false;
int count = counts[id];
// used to not check the same condition twice
bool satisfiedMergingConditions = false;
// only execute if the current word will not be merged into another one
if((id == warpSize - 1) || (endings[id] != beginnings[id+1]) || endings[id] == WORD_LITERAL || counts[id] > 1){
int i = 1;
satisfiedMergingConditions = true;
int bonus = 0;
// calculate merge shifts
while(true){
// check for warps with length 1 and the same word as our beginning
if(i < id && counts[id - i] == 1 && beginnings[id] == endings[id-i] && beginnings[id] != WORD_LITERAL){
mergeShift++;
merged[id - i] = true;
bonus += endLengths[id - i];
i++;
}
// check for warps that can be partially merged - with the same ending as our beginning
else if(i <= id && beginnings[id] == endings[id - i] && beginnings[id] != WORD_LITERAL){
mergeShift++;
merged[id - i] = true;
bonus += endLengths[id - i];
i++;
break;
}
else break;
}
endLengths[id] = bonus;
}
if(!satisfiedMergingConditions){
endLengths[id] = 0;
}
// let every thread get the shift for its warp
mergeShift = localScan(mergeShift, id);
int blockOffset = localScan(count, id);
// get the offset for the current warp within the block and store it in counts
counts[id] = blockOffset - count - mergeShift;
}
__syncthreads();
IF_LAST{
idle = merged[threadIdx.y];
}
// -- Writing final output for the block --
// get global offset for warp and warp offset
if(!idle){
// first word in a warp gets a bonus
int bonus = index == 0 ? endLengths[threadIdx.y] : 0;
index += counts[threadIdx.y];
if (word == ONES31) {
word = BIT3130 | (blockSize + bonus);
}
else if (word == ZEROS) {
word = BIT31 | (blockSize + bonus);
}
// if it's the last thread in block - either processing last word or the last thread of the last warp
if((id == (warpSize - 1) && threadIdx.y == (blockDim.y - 1)) || id_global == (dataSize - 1)){
blockCounts[blockIdx.x] = index + 1;
}
output[index + (blockDim.x * blockDim.y) * blockIdx.x] = word;
}
}
template __global__ void gpu_compressData<unsigned long long int>(unsigned int* data, unsigned int* output, unsigned long long int* blockCounts, int dataSize);
template __global__ void gpu_compressData<unsigned int>(unsigned int* data, unsigned int* output, unsigned int* blockCounts, int dataSize);
/*
* Device function moving data from different compressed blocks and removing gaps
*
* Parameters:
* initialOutput - device pointer to the compressed data with gaps
* finalOutput - device pointer to the output array
* blockCounts - device pointer to an array with block sizes
*/
template<class T>
__global__ void gpu_moveData(unsigned int* initialOutput, unsigned int* finalOutput, T* blockCounts){
int globalId = blockIdx.x * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.x + threadIdx.x;
unsigned int word = initialOutput[globalId];
if(word == 0) return;
unsigned int blockOffset = blockCounts[blockIdx.x];
int blockId = threadIdx.x + threadIdx.y * blockDim.x;
finalOutput[blockOffset + blockId] = word;
}
template __global__ void gpu_moveData<unsigned long long int>(unsigned int* initialOutput, unsigned int* finalOutput, unsigned long long int* blockCounts);
template __global__ void gpu_moveData<unsigned int>(unsigned int* initialOutput, unsigned int* finalOutput, unsigned int* blockCounts);
/*
* Device function moving data from different compressed blocks and removing gaps
*
* Parameters:
* data_gpu - device pointer to the compressed data
* counts_gpu - device pointer to an array storing sizes of blocks
* dataSize - input data size in integers
*/
template<class T>
__global__ void gpu_getCounts(unsigned int* data_gpu, T* counts_gpu, T dataSize){
// get global id
int globalId = blockIdx.x * (blockDim.x * blockDim.y) + blockDim.x * threadIdx.y + threadIdx.x;
// is within the data range
if(globalId < dataSize){
// get word
unsigned int word = data_gpu[globalId];
if((BIT31 & word) > 0){
// if filler word - get count
int count = word & (BIT30 - 1);
counts_gpu[globalId] = count;
}
else{
counts_gpu[globalId] = 1;
}
}
}
template __global__ void gpu_getCounts<unsigned long long int>(unsigned int* data_gpu, unsigned long long int* counts_gpu, unsigned long long int dataSize);
template __global__ void gpu_getCounts<unsigned int>(unsigned int* data_gpu, unsigned int* counts_gpu, unsigned int dataSize);
/*
* Device function performing decompression
*
* Parameters:
* data_gpu - device pointer to the compressed data
* counts_gpu - device pointer to an array storing sizes of blocks
* result_gpu - device pointer to the output array
* dataSize - input data size in integers
*/
template<class T>
__global__ void gpu_decompressWords(unsigned int* data_gpu, T* counts_gpu, unsigned int* result_gpu, T dataSize){
// get global id
unsigned long long int globalId = blockIdx.x * (blockDim.x * blockDim.y) + blockDim.x * threadIdx.y + threadIdx.x;
// out of range
if(globalId >= dataSize) return;
// read word
unsigned int word = data_gpu[globalId];
// read offset for block
unsigned long long int offset = counts_gpu[globalId];
//printf("id : %d offset: %d \n", globalId, offset);
// is filler word
if((BIT31 & word) > 0){
int count = word & (BIT30 - 1);
unsigned int filler;
// assign correct filler word
if((BIT3130 & word) == BIT3130){
// is ones
filler = ONES31;
}
else{
// zeros
filler = 0;
}
// fill array
for(int i = 0; i < count; i++){
result_gpu[offset + i] = filler;
}
}
// is literal word
else{
result_gpu[offset] = word;
}
// printf("%d", offset);
}
template __global__ void gpu_decompressWords<unsigned long long int>(unsigned int* data_gpu, unsigned long long int* counts_gpu, unsigned int* result_gpu, unsigned long long int dataSize);
template __global__ void gpu_decompressWords<unsigned int>(unsigned int* data_gpu, unsigned int* counts_gpu, unsigned int* result_gpu, unsigned int dataSize);
/*
* Device function converting 32 31-bit words into 31 32-bit ones
*
* Parameters:
* result_gpu - device pointer to the decompressed data
* finalOutput_gpu - device pointer to the final output array
* dataSize - input data size in integers
*/
template<class T>
__global__ void gpu_mergeWords(unsigned int* result_gpu, unsigned int* finalOutput_gpu, T dataSize){
// get global id
int globalId = blockIdx.x * (blockDim.x * blockDim.y) + blockDim.x * threadIdx.y + threadIdx.x;
int id = threadIdx.x;
if(globalId >= dataSize) return;
unsigned int word = result_gpu[globalId];
word = (__shfl_down(word, 1) << (warpSize - id - 1)) | (word >> id);
// first 31 threads save a word each
if(id < warpSize - 1){
finalOutput_gpu[blockIdx.x * 31*32 + threadIdx.y * 31 + id] = word;
// if(blockIdx.x * 31*32 + threadIdx.y * 31 + id == 31){
// printf("thread id %d", globalId);
// }
}
}
template __global__ void gpu_mergeWords<unsigned long long int>(unsigned int* result_gpu, unsigned int* finalOutput_gpu, unsigned long long int dataSize);
template __global__ void gpu_mergeWords<unsigned int>(unsigned int* result_gpu, unsigned int* finalOutput_gpu, unsigned int dataSize);
}
namespace no_sorting {
template<class T>
__global__ void compressData(
unsigned int* data,
unsigned int* output,
T* blockCounts,
T* orderingArray,
T* sizeCounter_gpu,
T dataSize) {
// count of words for every warp
__shared__ int counts[32];
// length of the last word in a warp
__shared__ int endLengths[32];
// type of the last word in a warp
__shared__ int endings[32];
// type of the first word in a warp
__shared__ int beginnings[32];
// array indicating whether the last thread of a warp has been merged
__shared__ bool merged[32];
// real block offset using atomic add
__shared__ T outputOffset;
// get thread id
int id = threadIdx.x;
T id_global = blockIdx.x * (31*32) + threadIdx.y *31 + id;
unsigned int word = 0;
// retrieve word, only first 31 threads
if(id_global > dataSize) return;
if (id < WARP_SIZE - 1) {
word = data[id_global];
}
// divide words into 31bit parts
// gets 31 - id bits from one lane above
// and id bits from own lane
//word = (__shfl_down(word, 1) & (ONES31 >> id)) << id | (word & TOP31ONES) >> (32 - id);
word = ONES31 & ((__shfl_up(word, 1) >> (32 - id)) | (word << id));
// word info variables
int ones = 0;
int zeros = 0;
int literals = 0;
int type;
// mark word types for warp
// detect words with zeros and words with ones
// is a zero fill word
if (word == ZEROS) {
zeros |= 1 << id;
type = WORD_ZEROS;
markEndWordTypes(WORD_ZEROS, endings, id);
}
// is a one fill word
else if (word == ONES31) {
ones |= 1 << id;
type = WORD_ONES;
markEndWordTypes(WORD_ONES, endings, id);
}
else
{
type = WORD_LITERAL;
markEndWordTypes(WORD_LITERAL, endings, id);
}
// exchange word information within the warp
zeros = orWithinWarp(zeros);
ones = orWithinWarp(ones);
literals = ~(zeros | ones);
// send complete information to other threads
// if (id == WARP_LEADER) {
// zeros = __shfl(zeros, 0);
// ones = __shfl(ones, 0);
// literals = __shfl(literals, 0);
// }
int n = 0x3 << id;
int flags = BIT31;
bool idle = true;
// if is not last
if (id < 31) {
int res = 1 << id;
if (((n & zeros) == res || (n & ones) == res || (literals & (1 << id)) > 0)) {
// mark endings
flags |= 1 << id;
idle = false;
}
}
else{
idle = false;
}
// exchange endings
flags = orWithinWarp(flags);
int blockSize = 1;
// index within warp
int index = __popc(((1<<id) - 1) & flags);
// if first word in block, write beginning
if(index == 0){
beginnings[threadIdx.y] = type;
}
// calculate the number of words within a block
if (!idle) {
for (int i = id-1; i >= 0; i--) {
if ((flags & (1 << i)) > 0) {
break;
}
blockSize++;
}
if (word == ONES31) {
// word = BIT3130;
writeEndingSize(id, endLengths, blockSize);
}
else if (word == ZEROS) {
// word = BIT31;
writeEndingSize(id, endLengths, blockSize);
}
else{
writeEndingSize(id, endLengths, 0);
}
}
// last thread calculates the number of words and writes it to the shared array
if(id == WARP_LEADER){
counts[threadIdx.y] = __popc(flags);
}
// sync all threads within block
__syncthreads();
// the first warp scans the array and gets total block word size
// then calculates offset
int mergeShift = 0;
if(threadIdx.y == BLOCK_LEADER){
merged[id] = false;
int count = counts[id];
// used to not check the same condition twice
bool satisfiedMergingConditions = false;
// only execute if it's not going to get merged with something else
if((id == warpSize - 1) || (endings[id] != beginnings[id+1]) || endings[id] == WORD_LITERAL || counts[id] > 1){
int i = 1;
satisfiedMergingConditions = true;
int bonus = 0;
// calculate merge shifts
while(true){
// has 1 length and words match
if(i < id && counts[id - i] == 1 && beginnings[id] == endings[id-i] && beginnings[id] != WORD_LITERAL){
mergeShift++;
merged[id - i] = true;
bonus += endLengths[id - i];
i++;
}
else if(i <= id && beginnings[id] == endings[id - i] && beginnings[id] != WORD_LITERAL){
mergeShift++;
merged[id - i] = true;
bonus += endLengths[id - i];
i++;
break;
}
else break;
}
endLengths[id] = bonus;
}
if(!satisfiedMergingConditions){
endLengths[id] = 0;
}
mergeShift = localScan(mergeShift, id);
int blockOffset = localScan(count, id);
counts[id] = blockOffset - count - mergeShift;
}
__syncthreads();
IF_LAST{
idle = merged[threadIdx.y];
}
int bonus = 0;
// get global offset for warp and warp offset
if(!idle){
// first word in a warp gets a bonus
bonus = index == 0 ? endLengths[threadIdx.y] : 0;
index += counts[threadIdx.y];
if (word == ONES31) {
word = BIT3130 | (blockSize + bonus);
}
else if (word == ZEROS) {
word = BIT31 | (blockSize + bonus);
}
// if it's the last thread in block - either processing last word or the last thread of the last warp
if((id == (warpSize - 1) && threadIdx.y == (blockDim.y - 1))){
blockCounts[blockIdx.x] = index + 1;
outputOffset = atomicAdd(sizeCounter_gpu, (index + 1));
}
}
__syncthreads();
if(!idle){
// assign offset
orderingArray[blockIdx.x] = outputOffset;
output[index + outputOffset] = word;
}
}
template __global__ void compressData<unsigned long long int>(
unsigned int* data,
unsigned int* output,
unsigned long long int* blockCounts,
unsigned long long int* orderingArray,
unsigned long long int* sizeCounter_gpu,
unsigned long long int dataSize);
template __global__ void compressData<unsigned int>(
unsigned int* data,
unsigned int* output,
unsigned int* blockCounts,
unsigned int* orderingArray,
unsigned int* sizeCounter_gpu,
unsigned int dataSize);
template<class T>
__global__ void getCounts(unsigned int* data_gpu, T* counts_gpu, T dataSize){
// get global id
T globalId = blockIdx.x * (blockDim.x * blockDim.y) + blockDim.x * threadIdx.y + threadIdx.x;
// is within the data range
if(globalId < dataSize){
// get word
unsigned int word = data_gpu[globalId];
if((BIT31 & word) > 0){
// if filler word - get count
int count = word & (BIT30 - 1);
counts_gpu[globalId] = count;
}
else{
counts_gpu[globalId] = 1;
}
}
}
template __global__ void getCounts<unsigned int>(unsigned int* data_gpu, unsigned int* counts_gpu, unsigned int dataSize);
template __global__ void getCounts<unsigned long long int>(unsigned int* data_gpu, unsigned long long int* counts_gpu, unsigned long long int dataSize);
template<class T>
__global__ void decompressWords(
unsigned int* data_gpu,
unsigned int* result_gpu,
T* offsets,
T* blockSizes,
T blocks,
T dataSize){
__shared__ int blockCounts[32];
__shared__ T blockStart;
__shared__ T blockEnd;
// get global id
unsigned int blockId = blockIdx.x;
int localId = blockDim.x * threadIdx.y + threadIdx.x;
if(localId == 0){
blockStart = offsets[blockId];
blockEnd = blockStart + blockSizes[blockId];
// printf("block : %d ", blockId);
}
__syncthreads();
if(threadIdx.y == 0){
blockCounts[threadIdx.x] = 0;
}
T globalId = blockStart + localId;
if(globalId < blockStart || globalId >= blockEnd) return;
unsigned int word = data_gpu[globalId];
T offset = 32*32*blockId;
// out of range
int count = (BIT31 & word) > 0 ? (word & (BIT30 - 1)) : 1;
// is not the last working warp in the block
int warpOffset = localScan(count, threadIdx.x) - count;
if(warpOffset >= 1024){
printf("warp offset overboard %d", warpOffset);
}
if(blockStart + threadIdx.y * 32 + 32 < blockEnd){
if(threadIdx.x == warpSize - 1){
blockCounts[threadIdx.y] = warpOffset + count;
if((warpOffset + count) >= 1024){
printf("danger: %d", warpOffset + count);
}
}
}
__syncthreads();
if(blockStart + threadIdx.y * 32 + 32 < blockEnd){
if(threadIdx.y == 0){
int val = blockCounts[threadIdx.x];
if(val >= 1024){
printf("warp offset: %d", val);
}
int o = localScan(val, threadIdx.x);
blockCounts[threadIdx.x] = o - val;
if(o-val > 1024*1024){
printf("val: %d, o: %d, warpOffset: %d, count: %d \n", val, o, warpOffset, count);
}
}
}
__syncthreads();
offset += blockCounts[threadIdx.y] + warpOffset;
if((BIT31 & word) > 0){
unsigned int filler;
// assign correct filler word
if((BIT3130 & word) == BIT3130){
// is ones
filler = ONES31;
}
else{
// zeros
filler = 0;
}
// fill array
for(int i = 0; i < count; i++){
result_gpu[offset + i] = filler;
}
}
else{
result_gpu[offset] = word;
}
// printf("%d", offset);
}
template __global__ void decompressWords<unsigned int>(
unsigned int* data_gpu,
unsigned int* result_gpu,
unsigned int* offsets,
unsigned int* blockSizes,
unsigned int blocks,
unsigned int dataSize);
template __global__ void decompressWords<unsigned long long int>(
unsigned int* data_gpu,
unsigned int* result_gpu,
unsigned long long int* offsets,
unsigned long long int* blockSizes,
unsigned long long int blocks,
unsigned long long int dataSize);
template<class T>
__global__ void mergeWords(unsigned int* result_gpu, unsigned int* finalOutput_gpu, T dataSize){
// get global id
T globalId = blockIdx.x * (blockDim.x * blockDim.y) + blockDim.x * threadIdx.y + threadIdx.x;
int id = threadIdx.x;
if(globalId >= dataSize) return;
unsigned int word = result_gpu[globalId];
word = (__shfl_down(word, 1) << (warpSize - id - 1)) | (word >> id);
// first 31 threads save a word each
if(id < warpSize - 1){
finalOutput_gpu[blockIdx.x * 31*32 + threadIdx.y * 31 + id] = word;
// if(blockIdx.x * 31*32 + threadIdx.y * 31 + id == 31){
// printf("thread id %d", globalId);
// }
}
}
template __global__ void mergeWords<unsigned int>(unsigned int* result_gpu, unsigned int* finalOutput_gpu, unsigned int dataSize);
template __global__ void mergeWords<unsigned long long int>(unsigned int* result_gpu, unsigned int* finalOutput_gpu, unsigned long long int dataSize);
__global__ void reoderKernel(
unsigned long long int* blockSizes,
unsigned long long int* offsets,
unsigned long long int* outputOffsets,
unsigned long long int blockCount,
unsigned int* data,
unsigned long long int dataSize,
unsigned int* output)
{
unsigned long long int globalId = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if(globalId >= dataSize) return;
int blockIndex = 0;
unsigned long long int blockOffset = 0;
// find current block
for(int i=0;i<blockCount; i++){
// find max value smaller than globalId
if(offsets[i] <= globalId && offsets[i] >= blockOffset){
blockIndex = i;
blockOffset = offsets[i];
}
else{
break;
}
}
output[outputOffsets[blockIndex] + globalId - blockOffset]= data[globalId];
}
}
| 36cd937df9e8e7ec974669ed7ce672f61cacc75b.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include "device_launch_parameters.h"
#include "kernels.h"
#include "tests.h"
#include <stdio.h>
#include "const.h"
// some defines to avoid repetition
#define IF_LAST if(id == (warpSize - 1))
#define WARP_OPERATION_DOWN(OP, NAME) \
__inline__ __device__ int NAME(int val){ \
for (int mask = WARP_SIZE/2; mask > 0; mask /= 2)\
val OP __shfl_xor(val, mask);\
return val; \
}\
WARP_OPERATION_DOWN(|=, orWithinWarp);
__inline__ __device__ int localScan(int val, int id){
for(int i = 1; i < 32; i<<=1){
int ret = __shfl_up(val, i);
val += id >= i ? ret : 0;
}
return val;
}
__inline__ __device__ void markEndWordTypes(int w, int* end, int id){
IF_LAST{
end[threadIdx.y] = w;
}
}
__inline__ __device__ void writeEndingSize(int id, int* lengths, int size){
IF_LAST{
lengths[threadIdx.y] = size;
}
}
namespace regular_kernels{
/*
* Device function performing compression
*
* Parameters:
* data - device pointer to data to be compressed
* output - device pointer to the output array
* blockCounts - device pointer to an array with block sizes
* dataSize - input data size in integers
*/
template<class T>
__global__ void gpu_compressData(unsigned int* data, unsigned int* output, T* blockCounts, int dataSize) {
// count of words for every warp
__shared__ int counts[32];
// length of the last word in a warp
__shared__ int endLengths[32];
// type of the last word in a warp
__shared__ int endings[32];
// type of the first word in a warp
__shared__ int beginnings[32];
// array indicating whether the last thread of a warp has been merged
__shared__ bool merged[32];
// -- Prepare initial variables and read data --
// get thread id
int id = threadIdx.x;
int id_global = blockIdx.x * (31*32) + threadIdx.y *31 + id;
unsigned int word = 0;
if(id_global > dataSize) return;
// retrieve word, only first 31 threads
if (id < WARP_SIZE - 1) {
word = data[id_global];
}
// divide words into 31bit parts
// gets 31 - id bits from one lane above
// and id bits from own lane
//word = (__shfl_down(word, 1) & (ONES31 >> id)) << id | (word & TOP31ONES) >> (32 - id);
word = ONES31 & ((__shfl_up(word, 1) >> (32 - id)) | (word << id));
// -- Recognize and mark words --
// word info variables
int ones = 0;
int zeros = 0;
int literals = 0;
int type;
// mark word types for warp
// detect words with zeros and words with ones
// is a zero fill word
if (word == ZEROS) {
zeros |= 1 << id;
type = WORD_ZEROS;
// last word in a warp marks word as zeros
markEndWordTypes(WORD_ZEROS, endings, id);
}
// is a one fill word
else if (word == ONES31) {
ones |= 1 << id;
type = WORD_ONES;
// last word in a warp marks word as ones
markEndWordTypes(WORD_ONES, endings, id);
}
else
{
type = WORD_LITERAL;
// last word in a warp marks word as literal
markEndWordTypes(WORD_LITERAL, endings, id);
}
// exchange word information within the warp
zeros = orWithinWarp(zeros);
ones = orWithinWarp(ones);
literals = ~(zeros | ones);
// send complete information to other threads
// if (id == WARP_LEADER) {
// zeros = __shfl(zeros, 0);
// ones = __shfl(ones, 0);
// literals = __shfl(literals, 0);
// }
int n = 0x3 << id;
int flags = BIT31;
bool idle = true;
// if is not last
if (id < 31) {
int res = 1 << id;
if (((n & zeros) == res || (n & ones) == res || (literals & (1 << id)) > 0)) {
// detect endings of sequences of words of the same type and mark them
flags |= 1 << id;
idle = false;
}
}
else{
idle = false;
}
// -- Calculate block size --
// exchange endings
flags = orWithinWarp(flags);
int blockSize = 1;
// index within warp
int index = __popc(((1<<id) - 1) & flags);
// if first word in block, write beginning
if(index == 0){
beginnings[threadIdx.y] = type;
}
// calculate the number of words within a compressed word
if (!idle) {
for (int i = id-1; i >= 0; i--) {
if ((flags & (1 << i)) > 0) {
break;
}
blockSize++;
}
if (word == ONES31) {
// word = BIT3130;
writeEndingSize(id, endLengths, blockSize);
}
else if (word == ZEROS) {
// word = BIT31;
writeEndingSize(id, endLengths, blockSize);
}
else{
writeEndingSize(id, endLengths, 0);
}
}
// last thread calculates the number of words and writes it to the shared array
if(id == WARP_LEADER){
counts[threadIdx.y] = __popc(flags);
}
// sync all threads within block
__syncthreads();
// -- Merging warps --
// the first warp scans the array and gets total block word size
// then calculates offset
int mergeShift = 0;
if(threadIdx.y == BLOCK_LEADER){
merged[id] = false;
int count = counts[id];
// used to not check the same condition twice
bool satisfiedMergingConditions = false;
// only execute if the current word will not be merged into another one
if((id == warpSize - 1) || (endings[id] != beginnings[id+1]) || endings[id] == WORD_LITERAL || counts[id] > 1){
int i = 1;
satisfiedMergingConditions = true;
int bonus = 0;
// calculate merge shifts
while(true){
// check for warps with length 1 and the same word as our beginning
if(i < id && counts[id - i] == 1 && beginnings[id] == endings[id-i] && beginnings[id] != WORD_LITERAL){
mergeShift++;
merged[id - i] = true;
bonus += endLengths[id - i];
i++;
}
// check for warps that can be partially merged - with the same ending as our beginning
else if(i <= id && beginnings[id] == endings[id - i] && beginnings[id] != WORD_LITERAL){
mergeShift++;
merged[id - i] = true;
bonus += endLengths[id - i];
i++;
break;
}
else break;
}
endLengths[id] = bonus;
}
if(!satisfiedMergingConditions){
endLengths[id] = 0;
}
// let every thread get the shift for its warp
mergeShift = localScan(mergeShift, id);
int blockOffset = localScan(count, id);
// get the offset for the current warp within the block and store it in counts
counts[id] = blockOffset - count - mergeShift;
}
__syncthreads();
IF_LAST{
idle = merged[threadIdx.y];
}
// -- Writing final output for the block --
// get global offset for warp and warp offset
if(!idle){
// first word in a warp gets a bonus
int bonus = index == 0 ? endLengths[threadIdx.y] : 0;
index += counts[threadIdx.y];
if (word == ONES31) {
word = BIT3130 | (blockSize + bonus);
}
else if (word == ZEROS) {
word = BIT31 | (blockSize + bonus);
}
// if it's the last thread in block - either processing last word or the last thread of the last warp
if((id == (warpSize - 1) && threadIdx.y == (blockDim.y - 1)) || id_global == (dataSize - 1)){
blockCounts[blockIdx.x] = index + 1;
}
output[index + (blockDim.x * blockDim.y) * blockIdx.x] = word;
}
}
template __global__ void gpu_compressData<unsigned long long int>(unsigned int* data, unsigned int* output, unsigned long long int* blockCounts, int dataSize);
template __global__ void gpu_compressData<unsigned int>(unsigned int* data, unsigned int* output, unsigned int* blockCounts, int dataSize);
/*
* Device function moving data from different compressed blocks and removing gaps
*
* Parameters:
* initialOutput - device pointer to the compressed data with gaps
* finalOutput - device pointer to the output array
* blockCounts - device pointer to an array with block sizes
*/
template<class T>
__global__ void gpu_moveData(unsigned int* initialOutput, unsigned int* finalOutput, T* blockCounts){
int globalId = blockIdx.x * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.x + threadIdx.x;
unsigned int word = initialOutput[globalId];
if(word == 0) return;
unsigned int blockOffset = blockCounts[blockIdx.x];
int blockId = threadIdx.x + threadIdx.y * blockDim.x;
finalOutput[blockOffset + blockId] = word;
}
template __global__ void gpu_moveData<unsigned long long int>(unsigned int* initialOutput, unsigned int* finalOutput, unsigned long long int* blockCounts);
template __global__ void gpu_moveData<unsigned int>(unsigned int* initialOutput, unsigned int* finalOutput, unsigned int* blockCounts);
/*
* Device function moving data from different compressed blocks and removing gaps
*
* Parameters:
* data_gpu - device pointer to the compressed data
* counts_gpu - device pointer to an array storing sizes of blocks
* dataSize - input data size in integers
*/
template<class T>
__global__ void gpu_getCounts(unsigned int* data_gpu, T* counts_gpu, T dataSize){
// get global id
int globalId = blockIdx.x * (blockDim.x * blockDim.y) + blockDim.x * threadIdx.y + threadIdx.x;
// is within the data range
if(globalId < dataSize){
// get word
unsigned int word = data_gpu[globalId];
if((BIT31 & word) > 0){
// if filler word - get count
int count = word & (BIT30 - 1);
counts_gpu[globalId] = count;
}
else{
counts_gpu[globalId] = 1;
}
}
}
template __global__ void gpu_getCounts<unsigned long long int>(unsigned int* data_gpu, unsigned long long int* counts_gpu, unsigned long long int dataSize);
template __global__ void gpu_getCounts<unsigned int>(unsigned int* data_gpu, unsigned int* counts_gpu, unsigned int dataSize);
/*
* Device function performing decompression
*
* Parameters:
* data_gpu - device pointer to the compressed data
* counts_gpu - device pointer to an array storing sizes of blocks
* result_gpu - device pointer to the output array
* dataSize - input data size in integers
*/
template<class T>
__global__ void gpu_decompressWords(unsigned int* data_gpu, T* counts_gpu, unsigned int* result_gpu, T dataSize){
// get global id
unsigned long long int globalId = blockIdx.x * (blockDim.x * blockDim.y) + blockDim.x * threadIdx.y + threadIdx.x;
// out of range
if(globalId >= dataSize) return;
// read word
unsigned int word = data_gpu[globalId];
// read offset for block
unsigned long long int offset = counts_gpu[globalId];
//printf("id : %d offset: %d \n", globalId, offset);
// is filler word
if((BIT31 & word) > 0){
int count = word & (BIT30 - 1);
unsigned int filler;
// assign correct filler word
if((BIT3130 & word) == BIT3130){
// is ones
filler = ONES31;
}
else{
// zeros
filler = 0;
}
// fill array
for(int i = 0; i < count; i++){
result_gpu[offset + i] = filler;
}
}
// is literal word
else{
result_gpu[offset] = word;
}
// printf("%d", offset);
}
template __global__ void gpu_decompressWords<unsigned long long int>(unsigned int* data_gpu, unsigned long long int* counts_gpu, unsigned int* result_gpu, unsigned long long int dataSize);
template __global__ void gpu_decompressWords<unsigned int>(unsigned int* data_gpu, unsigned int* counts_gpu, unsigned int* result_gpu, unsigned int dataSize);
/*
* Device function converting 32 31-bit words into 31 32-bit ones
*
* Parameters:
* result_gpu - device pointer to the decompressed data
* finalOutput_gpu - device pointer to the final output array
* dataSize - input data size in integers
*/
template<class T>
__global__ void gpu_mergeWords(unsigned int* result_gpu, unsigned int* finalOutput_gpu, T dataSize){
// get global id
int globalId = blockIdx.x * (blockDim.x * blockDim.y) + blockDim.x * threadIdx.y + threadIdx.x;
int id = threadIdx.x;
if(globalId >= dataSize) return;
unsigned int word = result_gpu[globalId];
word = (__shfl_down(word, 1) << (warpSize - id - 1)) | (word >> id);
// first 31 threads save a word each
if(id < warpSize - 1){
finalOutput_gpu[blockIdx.x * 31*32 + threadIdx.y * 31 + id] = word;
// if(blockIdx.x * 31*32 + threadIdx.y * 31 + id == 31){
// printf("thread id %d", globalId);
// }
}
}
template __global__ void gpu_mergeWords<unsigned long long int>(unsigned int* result_gpu, unsigned int* finalOutput_gpu, unsigned long long int dataSize);
template __global__ void gpu_mergeWords<unsigned int>(unsigned int* result_gpu, unsigned int* finalOutput_gpu, unsigned int dataSize);
}
namespace no_sorting {
template<class T>
__global__ void compressData(
unsigned int* data,
unsigned int* output,
T* blockCounts,
T* orderingArray,
T* sizeCounter_gpu,
T dataSize) {
// count of words for every warp
__shared__ int counts[32];
// length of the last word in a warp
__shared__ int endLengths[32];
// type of the last word in a warp
__shared__ int endings[32];
// type of the first word in a warp
__shared__ int beginnings[32];
// array indicating whether the last thread of a warp has been merged
__shared__ bool merged[32];
// real block offset using atomic add
__shared__ T outputOffset;
// get thread id
int id = threadIdx.x;
T id_global = blockIdx.x * (31*32) + threadIdx.y *31 + id;
unsigned int word = 0;
// retrieve word, only first 31 threads
if(id_global > dataSize) return;
if (id < WARP_SIZE - 1) {
word = data[id_global];
}
// divide words into 31bit parts
// gets 31 - id bits from one lane above
// and id bits from own lane
//word = (__shfl_down(word, 1) & (ONES31 >> id)) << id | (word & TOP31ONES) >> (32 - id);
word = ONES31 & ((__shfl_up(word, 1) >> (32 - id)) | (word << id));
// word info variables
int ones = 0;
int zeros = 0;
int literals = 0;
int type;
// mark word types for warp
// detect words with zeros and words with ones
// is a zero fill word
if (word == ZEROS) {
zeros |= 1 << id;
type = WORD_ZEROS;
markEndWordTypes(WORD_ZEROS, endings, id);
}
// is a one fill word
else if (word == ONES31) {
ones |= 1 << id;
type = WORD_ONES;
markEndWordTypes(WORD_ONES, endings, id);
}
else
{
type = WORD_LITERAL;
markEndWordTypes(WORD_LITERAL, endings, id);
}
// exchange word information within the warp
zeros = orWithinWarp(zeros);
ones = orWithinWarp(ones);
literals = ~(zeros | ones);
// send complete information to other threads
// if (id == WARP_LEADER) {
// zeros = __shfl(zeros, 0);
// ones = __shfl(ones, 0);
// literals = __shfl(literals, 0);
// }
int n = 0x3 << id;
int flags = BIT31;
bool idle = true;
// if is not last
if (id < 31) {
int res = 1 << id;
if (((n & zeros) == res || (n & ones) == res || (literals & (1 << id)) > 0)) {
// mark endings
flags |= 1 << id;
idle = false;
}
}
else{
idle = false;
}
// exchange endings
flags = orWithinWarp(flags);
int blockSize = 1;
// index within warp
int index = __popc(((1<<id) - 1) & flags);
// if first word in block, write beginning
if(index == 0){
beginnings[threadIdx.y] = type;
}
// calculate the number of words within a block
if (!idle) {
for (int i = id-1; i >= 0; i--) {
if ((flags & (1 << i)) > 0) {
break;
}
blockSize++;
}
if (word == ONES31) {
// word = BIT3130;
writeEndingSize(id, endLengths, blockSize);
}
else if (word == ZEROS) {
// word = BIT31;
writeEndingSize(id, endLengths, blockSize);
}
else{
writeEndingSize(id, endLengths, 0);
}
}
// last thread calculates the number of words and writes it to the shared array
if(id == WARP_LEADER){
counts[threadIdx.y] = __popc(flags);
}
// sync all threads within block
__syncthreads();
// the first warp scans the array and gets total block word size
// then calculates offset
int mergeShift = 0;
if(threadIdx.y == BLOCK_LEADER){
merged[id] = false;
int count = counts[id];
// used to not check the same condition twice
bool satisfiedMergingConditions = false;
// only execute if it's not going to get merged with something else
if((id == warpSize - 1) || (endings[id] != beginnings[id+1]) || endings[id] == WORD_LITERAL || counts[id] > 1){
int i = 1;
satisfiedMergingConditions = true;
int bonus = 0;
// calculate merge shifts
while(true){
// has 1 length and words match
if(i < id && counts[id - i] == 1 && beginnings[id] == endings[id-i] && beginnings[id] != WORD_LITERAL){
mergeShift++;
merged[id - i] = true;
bonus += endLengths[id - i];
i++;
}
else if(i <= id && beginnings[id] == endings[id - i] && beginnings[id] != WORD_LITERAL){
mergeShift++;
merged[id - i] = true;
bonus += endLengths[id - i];
i++;
break;
}
else break;
}
endLengths[id] = bonus;
}
if(!satisfiedMergingConditions){
endLengths[id] = 0;
}
mergeShift = localScan(mergeShift, id);
int blockOffset = localScan(count, id);
counts[id] = blockOffset - count - mergeShift;
}
__syncthreads();
IF_LAST{
idle = merged[threadIdx.y];
}
int bonus = 0;
// get global offset for warp and warp offset
if(!idle){
// first word in a warp gets a bonus
bonus = index == 0 ? endLengths[threadIdx.y] : 0;
index += counts[threadIdx.y];
if (word == ONES31) {
word = BIT3130 | (blockSize + bonus);
}
else if (word == ZEROS) {
word = BIT31 | (blockSize + bonus);
}
// if it's the last thread in block - either processing last word or the last thread of the last warp
if((id == (warpSize - 1) && threadIdx.y == (blockDim.y - 1))){
blockCounts[blockIdx.x] = index + 1;
outputOffset = atomicAdd(sizeCounter_gpu, (index + 1));
}
}
__syncthreads();
if(!idle){
// assign offset
orderingArray[blockIdx.x] = outputOffset;
output[index + outputOffset] = word;
}
}
template __global__ void compressData<unsigned long long int>(
unsigned int* data,
unsigned int* output,
unsigned long long int* blockCounts,
unsigned long long int* orderingArray,
unsigned long long int* sizeCounter_gpu,
unsigned long long int dataSize);
template __global__ void compressData<unsigned int>(
unsigned int* data,
unsigned int* output,
unsigned int* blockCounts,
unsigned int* orderingArray,
unsigned int* sizeCounter_gpu,
unsigned int dataSize);
template<class T>
__global__ void getCounts(unsigned int* data_gpu, T* counts_gpu, T dataSize){
// get global id
T globalId = blockIdx.x * (blockDim.x * blockDim.y) + blockDim.x * threadIdx.y + threadIdx.x;
// is within the data range
if(globalId < dataSize){
// get word
unsigned int word = data_gpu[globalId];
if((BIT31 & word) > 0){
// if filler word - get count
int count = word & (BIT30 - 1);
counts_gpu[globalId] = count;
}
else{
counts_gpu[globalId] = 1;
}
}
}
template __global__ void getCounts<unsigned int>(unsigned int* data_gpu, unsigned int* counts_gpu, unsigned int dataSize);
template __global__ void getCounts<unsigned long long int>(unsigned int* data_gpu, unsigned long long int* counts_gpu, unsigned long long int dataSize);
template<class T>
__global__ void decompressWords(
unsigned int* data_gpu,
unsigned int* result_gpu,
T* offsets,
T* blockSizes,
T blocks,
T dataSize){
__shared__ int blockCounts[32];
__shared__ T blockStart;
__shared__ T blockEnd;
// get global id
unsigned int blockId = blockIdx.x;
int localId = blockDim.x * threadIdx.y + threadIdx.x;
if(localId == 0){
blockStart = offsets[blockId];
blockEnd = blockStart + blockSizes[blockId];
// printf("block : %d ", blockId);
}
__syncthreads();
if(threadIdx.y == 0){
blockCounts[threadIdx.x] = 0;
}
T globalId = blockStart + localId;
if(globalId < blockStart || globalId >= blockEnd) return;
unsigned int word = data_gpu[globalId];
T offset = 32*32*blockId;
// out of range
int count = (BIT31 & word) > 0 ? (word & (BIT30 - 1)) : 1;
// is not the last working warp in the block
int warpOffset = localScan(count, threadIdx.x) - count;
if(warpOffset >= 1024){
printf("warp offset overboard %d", warpOffset);
}
if(blockStart + threadIdx.y * 32 + 32 < blockEnd){
if(threadIdx.x == warpSize - 1){
blockCounts[threadIdx.y] = warpOffset + count;
if((warpOffset + count) >= 1024){
printf("danger: %d", warpOffset + count);
}
}
}
__syncthreads();
if(blockStart + threadIdx.y * 32 + 32 < blockEnd){
if(threadIdx.y == 0){
int val = blockCounts[threadIdx.x];
if(val >= 1024){
printf("warp offset: %d", val);
}
int o = localScan(val, threadIdx.x);
blockCounts[threadIdx.x] = o - val;
if(o-val > 1024*1024){
printf("val: %d, o: %d, warpOffset: %d, count: %d \n", val, o, warpOffset, count);
}
}
}
__syncthreads();
offset += blockCounts[threadIdx.y] + warpOffset;
if((BIT31 & word) > 0){
unsigned int filler;
// assign correct filler word
if((BIT3130 & word) == BIT3130){
// is ones
filler = ONES31;
}
else{
// zeros
filler = 0;
}
// fill array
for(int i = 0; i < count; i++){
result_gpu[offset + i] = filler;
}
}
else{
result_gpu[offset] = word;
}
// printf("%d", offset);
}
template __global__ void decompressWords<unsigned int>(
unsigned int* data_gpu,
unsigned int* result_gpu,
unsigned int* offsets,
unsigned int* blockSizes,
unsigned int blocks,
unsigned int dataSize);
template __global__ void decompressWords<unsigned long long int>(
unsigned int* data_gpu,
unsigned int* result_gpu,
unsigned long long int* offsets,
unsigned long long int* blockSizes,
unsigned long long int blocks,
unsigned long long int dataSize);
template<class T>
__global__ void mergeWords(unsigned int* result_gpu, unsigned int* finalOutput_gpu, T dataSize){
// get global id
T globalId = blockIdx.x * (blockDim.x * blockDim.y) + blockDim.x * threadIdx.y + threadIdx.x;
int id = threadIdx.x;
if(globalId >= dataSize) return;
unsigned int word = result_gpu[globalId];
word = (__shfl_down(word, 1) << (warpSize - id - 1)) | (word >> id);
// first 31 threads save a word each
if(id < warpSize - 1){
finalOutput_gpu[blockIdx.x * 31*32 + threadIdx.y * 31 + id] = word;
// if(blockIdx.x * 31*32 + threadIdx.y * 31 + id == 31){
// printf("thread id %d", globalId);
// }
}
}
template __global__ void mergeWords<unsigned int>(unsigned int* result_gpu, unsigned int* finalOutput_gpu, unsigned int dataSize);
template __global__ void mergeWords<unsigned long long int>(unsigned int* result_gpu, unsigned int* finalOutput_gpu, unsigned long long int dataSize);
__global__ void reoderKernel(
unsigned long long int* blockSizes,
unsigned long long int* offsets,
unsigned long long int* outputOffsets,
unsigned long long int blockCount,
unsigned int* data,
unsigned long long int dataSize,
unsigned int* output)
{
unsigned long long int globalId = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if(globalId >= dataSize) return;
int blockIndex = 0;
unsigned long long int blockOffset = 0;
// find current block
for(int i=0;i<blockCount; i++){
// find max value smaller than globalId
if(offsets[i] <= globalId && offsets[i] >= blockOffset){
blockIndex = i;
blockOffset = offsets[i];
}
else{
break;
}
}
output[outputOffsets[blockIndex] + globalId - blockOffset]= data[globalId];
}
}
|
3fbf4b9f2cf6f64e2e2c9b13a09635b1b9fe7a6c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Mst solver tests
// Author: Alex Fender [email protected]
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/legacy/graph.hpp>
#include <cugraph/utilities/high_res_timer.hpp>
#include <raft/core/error.hpp>
#include <raft/core/handle.hpp>
#include <hip/hip_runtime_api.h>
#include <cmath>
#include "../src/converters/legacy/COOtoCSR.cuh"
#include <thrust/device_ptr.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/reduce.h>
typedef struct Mst_Usecase_t {
std::string matrix_file;
Mst_Usecase_t(const std::string& a)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((a != "") && (a[0] != '/')) {
matrix_file = rapidsDatasetRootDir + "/" + a;
} else {
matrix_file = a;
}
}
Mst_Usecase_t& operator=(const Mst_Usecase_t& rhs)
{
matrix_file = rhs.matrix_file;
return *this;
}
} Mst_Usecase;
class Tests_Mst : public ::testing::TestWithParam<Mst_Usecase> {
public:
Tests_Mst() {}
static void SetUpTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
template <typename T>
void run_current_test(const Mst_Usecase& param)
{
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".") +
std::string(test_info->name()) + std::string("_") +
cugraph::test::getFileName(param.matrix_file) + std::string("_") +
ss.str().c_str();
int m, k, nnz;
MM_typecode mc;
HighResTimer hr_timer{};
FILE* fpin = fopen(param.matrix_file.c_str(), "r");
ASSERT_NE(fpin, nullptr) << "fopen (" << param.matrix_file << ") failure.";
ASSERT_EQ(cugraph::test::mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz), 0)
<< "could not read Matrix Market file properties"
<< "\n";
ASSERT_TRUE(mm_is_matrix(mc));
ASSERT_TRUE(mm_is_coordinate(mc));
ASSERT_FALSE(mm_is_complex(mc));
ASSERT_FALSE(mm_is_skew(mc));
// Allocate memory on host
std::vector<int> cooRowInd(nnz), cooColInd(nnz);
std::vector<T> cooVal(nnz), mst(m);
// Read
ASSERT_EQ((cugraph::test::mm_to_coo<int, T>(
fpin, 1, nnz, &cooRowInd[0], &cooColInd[0], &cooVal[0], NULL)),
0)
<< "could not read matrix data"
<< "\n";
ASSERT_EQ(fclose(fpin), 0);
raft::handle_t handle;
std::cout << std::endl;
cugraph::legacy::GraphCOOView<int, int, T> G_coo(
&cooRowInd[0], &cooColInd[0], &cooVal[0], m, nnz);
auto G_unique = cugraph::coo_to_csr(G_coo);
cugraph::legacy::GraphCSRView<int, int, T> G(G_unique->view().offsets,
G_unique->view().indices,
G_unique->view().edge_data,
G_unique->view().number_of_vertices,
G_unique->view().number_of_edges);
hipDeviceSynchronize();
hr_timer.start("MST");
hipProfilerStart();
auto mst_edges = cugraph::minimum_spanning_tree<int, int, T>(handle, G);
hipProfilerStop();
hipDeviceSynchronize();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
auto expected_mst_weight = thrust::reduce(
thrust::device_pointer_cast(G_unique->view().edge_data),
thrust::device_pointer_cast(G_unique->view().edge_data) + G_unique->view().number_of_edges);
auto calculated_mst_weight = thrust::reduce(
thrust::device_pointer_cast(mst_edges->view().edge_data),
thrust::device_pointer_cast(mst_edges->view().edge_data) + mst_edges->view().number_of_edges);
std::cout << "calculated_mst_weight: " << calculated_mst_weight << std::endl;
std::cout << "number_of_MST_edges: " << mst_edges->view().number_of_edges << std::endl;
EXPECT_LE(calculated_mst_weight, expected_mst_weight);
EXPECT_LE(mst_edges->view().number_of_edges, 2 * m - 2);
}
};
TEST_P(Tests_Mst, CheckFP32_T) { run_current_test<float>(GetParam()); }
TEST_P(Tests_Mst, CheckFP64_T) { run_current_test<double>(GetParam()); }
INSTANTIATE_TEST_SUITE_P(simple_test,
Tests_Mst,
::testing::Values(Mst_Usecase("test/datasets/netscience.mtx")));
CUGRAPH_TEST_PROGRAM_MAIN()
| 3fbf4b9f2cf6f64e2e2c9b13a09635b1b9fe7a6c.cu | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Mst solver tests
// Author: Alex Fender [email protected]
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/legacy/graph.hpp>
#include <cugraph/utilities/high_res_timer.hpp>
#include <raft/core/error.hpp>
#include <raft/core/handle.hpp>
#include <cuda_profiler_api.h>
#include <cmath>
#include "../src/converters/legacy/COOtoCSR.cuh"
#include <thrust/device_ptr.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/reduce.h>
typedef struct Mst_Usecase_t {
std::string matrix_file;
Mst_Usecase_t(const std::string& a)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((a != "") && (a[0] != '/')) {
matrix_file = rapidsDatasetRootDir + "/" + a;
} else {
matrix_file = a;
}
}
Mst_Usecase_t& operator=(const Mst_Usecase_t& rhs)
{
matrix_file = rhs.matrix_file;
return *this;
}
} Mst_Usecase;
class Tests_Mst : public ::testing::TestWithParam<Mst_Usecase> {
public:
Tests_Mst() {}
static void SetUpTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
template <typename T>
void run_current_test(const Mst_Usecase& param)
{
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".") +
std::string(test_info->name()) + std::string("_") +
cugraph::test::getFileName(param.matrix_file) + std::string("_") +
ss.str().c_str();
int m, k, nnz;
MM_typecode mc;
HighResTimer hr_timer{};
FILE* fpin = fopen(param.matrix_file.c_str(), "r");
ASSERT_NE(fpin, nullptr) << "fopen (" << param.matrix_file << ") failure.";
ASSERT_EQ(cugraph::test::mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz), 0)
<< "could not read Matrix Market file properties"
<< "\n";
ASSERT_TRUE(mm_is_matrix(mc));
ASSERT_TRUE(mm_is_coordinate(mc));
ASSERT_FALSE(mm_is_complex(mc));
ASSERT_FALSE(mm_is_skew(mc));
// Allocate memory on host
std::vector<int> cooRowInd(nnz), cooColInd(nnz);
std::vector<T> cooVal(nnz), mst(m);
// Read
ASSERT_EQ((cugraph::test::mm_to_coo<int, T>(
fpin, 1, nnz, &cooRowInd[0], &cooColInd[0], &cooVal[0], NULL)),
0)
<< "could not read matrix data"
<< "\n";
ASSERT_EQ(fclose(fpin), 0);
raft::handle_t handle;
std::cout << std::endl;
cugraph::legacy::GraphCOOView<int, int, T> G_coo(
&cooRowInd[0], &cooColInd[0], &cooVal[0], m, nnz);
auto G_unique = cugraph::coo_to_csr(G_coo);
cugraph::legacy::GraphCSRView<int, int, T> G(G_unique->view().offsets,
G_unique->view().indices,
G_unique->view().edge_data,
G_unique->view().number_of_vertices,
G_unique->view().number_of_edges);
cudaDeviceSynchronize();
hr_timer.start("MST");
cudaProfilerStart();
auto mst_edges = cugraph::minimum_spanning_tree<int, int, T>(handle, G);
cudaProfilerStop();
cudaDeviceSynchronize();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
auto expected_mst_weight = thrust::reduce(
thrust::device_pointer_cast(G_unique->view().edge_data),
thrust::device_pointer_cast(G_unique->view().edge_data) + G_unique->view().number_of_edges);
auto calculated_mst_weight = thrust::reduce(
thrust::device_pointer_cast(mst_edges->view().edge_data),
thrust::device_pointer_cast(mst_edges->view().edge_data) + mst_edges->view().number_of_edges);
std::cout << "calculated_mst_weight: " << calculated_mst_weight << std::endl;
std::cout << "number_of_MST_edges: " << mst_edges->view().number_of_edges << std::endl;
EXPECT_LE(calculated_mst_weight, expected_mst_weight);
EXPECT_LE(mst_edges->view().number_of_edges, 2 * m - 2);
}
};
TEST_P(Tests_Mst, CheckFP32_T) { run_current_test<float>(GetParam()); }
TEST_P(Tests_Mst, CheckFP64_T) { run_current_test<double>(GetParam()); }
INSTANTIATE_TEST_SUITE_P(simple_test,
Tests_Mst,
::testing::Values(Mst_Usecase("test/datasets/netscience.mtx")));
CUGRAPH_TEST_PROGRAM_MAIN()
|
ca30eb22977f21309456233e1168276a5b6c4df0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2015 Kyuyeon Hwang ([email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "CudaKernels.h"
#include <stdio.h>
#define THREAD_PER_BLOCK 512
namespace fractal
{
namespace cudaKernels
{
template<class T>
inline __device__ T _exp(const T x);
template<class T>
inline __device__ T _log(const T x);
template<class T>
inline __device__ T _sqrt(const T x);
template<class T>
static __global__ void MemSetKernel(T *x, const T val, const unsigned long n);
template<class T>
static __global__ void ElemMultKernel(const T *x, const T *y, T *z, const unsigned long n);
template<class T>
static __global__ void AddKernel(const T *x, const T *y, T *z, const unsigned long n);
template<class T>
static __global__ void FuncSigmoidKernel(const T *x, T *y, const unsigned long n, FLOAT delta);
/* IBM check start */
/* template signal quantization for tanh */
template<class T>
static __global__ void FuncTanhKernel(const T *x, T *y, const unsigned long n,FLOAT delta);
/* template signal quantization for tanh */
/* IBM check end */
/* IBM check start */
/* template weight quantization */
template<class T>
static __global__ void WeightQuantKernel(const T *x, T *y, const unsigned long n,FLOAT delta, int M);
/* template weight quantization */
/* IBM check end */
template<class T>
static __global__ void FuncSoftplusKernel(const T *x, T *y, const unsigned long n);
/* IBM check start */
/* template signal quantization for rectlinear */
template<class T>
static __global__ void FuncRectLinearKernel(const T *x, T *y,T *y_fixed , const unsigned long n,FLOAT delta, int M,int relu_delta_final_decision);
/* template signal quantization for rectlinear */
/* IBM check end */
template<class T>
static __global__ void FuncSoftmaxKernel(const T *x, T *y, const unsigned long n);
template<class T>
static __global__ void FuncBoundRangeKernel(const T *x, T *y, const T _min, const T _max, const unsigned long n);
template<class T>
static __global__ void FuncSigmoidDerivKernel(const T *x, T *y, const unsigned long n);
template<class T>
static __global__ void FuncTanhDerivKernel(const T *x, T *y, const unsigned long n);
template<class T>
static __global__ void FuncSoftplusDerivKernel(const T *x, T *y, const unsigned long n);
template<class T>
static __global__ void FuncRectLinearDerivKernel(const T *x, T *y, const unsigned long n);
template<class T>
static __global__ void GenerateDropoutMaskKernel(T *mask, const T *uniformDist, const unsigned long n, const T dropoutRate);
template<class T>
static __global__ void RmspropKernel(T *newDerivs, const T *derivs, T *msDeriv, const T decayRate, const unsigned long n);
template<class T>
static __global__ void AdadeltaKernel(T *deltas, const T *derivs, T *msDeriv, T *msDelta, const T learningRate, const T decayRate, const unsigned long n);
template<>
inline __device__ float _exp<float>(const float x)
{
return min(__expf(x), 1e30);
}
template<>
inline __device__ double _exp<double>(const double x)
{
return min(exp(x), 1e300);
}
template<>
inline __device__ float _log<float>(const float x)
{
return __logf(x);
}
template<>
inline __device__ double _log<double>(const double x)
{
return log(x);
}
template<>
inline __device__ float _sqrt<float>(const float x)
{
return __fsqrt_rn(x);
}
template<>
inline __device__ double _sqrt<double>(const double x)
{
return __dsqrt_rn(x);
}
template<class T>
static __global__ void MemSetKernel(T *x, const T val, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
x[idx] = val;
}
template<class T>
static __global__ void ElemMultKernel(const T *x, const T *y, T *z, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
z[idx] = x[idx] * y[idx];
}
template<class T>
static __global__ void AddKernel(const T *x, const T *y, T *z, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
z[idx] = x[idx] + y[idx];
}
/* IBM check start */
/* Signal quantization kernel for Sigmoid */
/* If the QUANT_RELU flag is on Quantization Model */
template<class T>
static __global__ void FuncSigmoidKernel(const T *x, T *y,T *y_fixed, const unsigned long n, FLOAT delta)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
y_fixed[idx] = (T)1 / ((T)1 + _exp<T>(-x[idx]));
y[idx] = (T)1 / ((T)1 + _exp<T>(-x[idx]));
#if QUANT_RELU // condition for signal quantization
if((delta <101.0 && delta > 99.0) == 0)
{
y[idx] = floor((fabs(y_fixed[idx])/delta)+(T)0.5);
y[idx] = y[idx]*delta;
}
#endif
}
/* Signal quantization kernel for Sigmoid */
/* IBM check end */
/* IBM check start */
/* Signal quantization kernel for Tanh */
/* If the QUANT_RELU flag is on Quantization Model */
template<class T>
static __global__ void FuncTanhKernel(const T *x, T *y, const unsigned long n, FLOAT delta)
{
unsigned long idx;
T v;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
v = _exp<T>((T)(-2) * x[idx]);
y[idx] = (T)2 / ((T)1 + v) - (T)1;
#if QUANT_RELU
if((delta <101.0 && delta > 99.0) == 0)
{
T sign_;
sign_ = signbit(y[idx]);
if(sign_ != 0)
y[idx] = -1 * min(floor((fabs(y[idx])/delta)+(T)0.5),(1/delta));
else
y[idx] = min(floor((fabs(y[idx])/delta)+(T)0.5),(1/delta));
y[idx] = y[idx]*delta;
}
#endif
}
/* Signal quantization kernel for Tanh */
/* IBM check end */
/* IBM check start */
/* Weight quantization kernel */
template<class T>
static __global__ void WeightQuantKernel(const T *x, T *y, const unsigned long n, FLOAT delta,int M)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
int sign_;
sign_ = signbit(x[idx]);
if(sign_ != 0)
y[idx] = (T)-1 * min((T)floor((fabs(x[idx])/delta)+(T)0.5),(T)(M-1)/2);
else
y[idx] = min((T)floor((fabs(x[idx])/delta)+(T)0.5),(T)((M-1)/2));
y[idx] = y[idx]*delta;
}
/* Weight quantization kernel */
/* IBM check end */
template<class T>
static __global__ void FuncSoftplusKernel(const T *x, T *y, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
y[idx] = _log<T>((T)1 + _exp<T>(x[idx]));
}
/* IBM check start */
/* Signal quantization kernel for Rectlinear */
/* If the QUANT_RELU flag is on Quantization Model */
template<class T>
static __global__ void FuncRectLinearKernel(const T *x, T *y, T *y_fixed, const unsigned long n,FLOAT delta, int M,int relu_delta_final_decision)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
//y[idx] = max((T)0, x[idx]);
/* Leaky */
y[idx] = max((T)0.01 * x[idx], x[idx]);
y_fixed[idx] = max((T)0.01 * x[idx], x[idx]);
#if QUANT_RELU
if(relu_delta_final_decision == 1)
{
//if(threadIdx.x == 1 )printf("pre : %f\n",y[idx]);
y[idx] = min((T)floor((y[idx]/delta)+(T)0.5),(T)(M-1));
y[idx] = y[idx]*delta;
//if(threadIdx.x == 1) printf("after : %f\n",y[idx]);
}
#endif
}
/* Signal quantization kernel for Rectlinear */
/* IBM check end */
template<class T>
static __global__ void FuncSoftmaxKernel(const T *x, T *y, const unsigned long n)
{
__shared__ T _v[THREAD_PER_BLOCK];
T v_tmp, v_max;
unsigned long i;
x += blockIdx.x * n;
y += blockIdx.x * n;
/* Sequential reduction(max) */
v_tmp = threadIdx.x < n ? x[threadIdx.x] : (FLOAT) 0;
#pragma unroll
for(i = threadIdx.x + blockDim.x; i < n; i += blockDim.x)
{
v_tmp = max(v_tmp, x[i]);
}
_v[threadIdx.x] = v_tmp;
__syncthreads();
/* Parallel reduction(max) */
i = (blockDim.x >> 1);
if(threadIdx.x < i)
v_tmp = _v[threadIdx.x];
for(; i > 0; i >>= 1)
{
if(threadIdx.x < i && threadIdx.x + i < n)
{
v_tmp = max(v_tmp, _v[threadIdx.x + i]);
_v[threadIdx.x] = v_tmp;
}
__syncthreads();
}
v_max = _v[0];
__syncthreads();
/* Sequential reduction(+) */
v_tmp = (T) 0;
#pragma unroll
for(i = threadIdx.x; i < n; i += blockDim.x)
{
v_tmp += _exp<T>(x[i] - v_max);
}
_v[threadIdx.x] = v_tmp;
__syncthreads();
/* Parallel reduction(+) */
i = (blockDim.x >> 1);
if(threadIdx.x < i)
v_tmp = _v[threadIdx.x];
for(; i > 0; i >>= 1)
{
if(threadIdx.x < i)
{
v_tmp += _v[threadIdx.x + i];
_v[threadIdx.x] = v_tmp;
}
__syncthreads();
}
/* Update */
v_tmp = _v[0];
#pragma unroll
for(i = threadIdx.x; i < n; i += blockDim.x)
{
y[i] = _exp<T>(x[i] - v_max) / v_tmp;
}
}
template<class T>
static __global__ void FuncBoundRangeKernel(const T *x, T *y, const T _min, const T _max, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
y[idx] = min(_max, max(_min, x[idx]));
}
template<class T>
static __global__ void FuncSigmoidDerivKernel(const T *x, T *y, const unsigned long n)
{
unsigned long idx;
T v;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
v = x[idx];
y[idx] = v * ((T)1 - v);
}
template<class T>
static __global__ void FuncTanhDerivKernel(const T *x, T *y, const unsigned long n)
{
unsigned long idx;
T v;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
v = x[idx];
y[idx] = ((T)1 - v) * ((T)1 + v);
}
template<class T>
static __global__ void FuncSoftplusDerivKernel(const T *x, T *y, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
y[idx] = (T)1 - _exp<T>(-x[idx]);
}
template<class T>
static __global__ void FuncRectLinearDerivKernel(const T *x, T *y, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
//y[idx] = (T)(x[idx] > (T)0);
/* Leaky */
y[idx] = (T)0.01 + (T)0.99 * (T)(x[idx] > (T)0);
}
template<class T>
static __global__ void GenerateDropoutMaskKernel(T *mask, const T *uniformDist, const unsigned long n, const T dropoutRate)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
mask[idx] = (T)(uniformDist[idx] >= dropoutRate) / ((T)1 - dropoutRate);
}
template<class T>
static __global__ void RmspropKernel(T *newDerivs, const T *derivs, T *msDeriv, const T decayRate, const unsigned long n)
{
unsigned long idx;
T ms, rms, deriv;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
ms = msDeriv[idx];
deriv = derivs[idx];
T bound = _sqrt<T>((T)1 / ((T)1 - decayRate));
ms = decayRate * ms + ((T)1 - decayRate) * deriv * deriv;
rms = _sqrt<T>(ms) + (T)1e-20;
newDerivs[idx] = min(bound, max(-bound, deriv / rms));
msDeriv[idx] = ms;
}
template<class T>
static __global__ void AdadeltaKernel(T *deltas, const T *derivs, T *msDeriv, T *msDelta, const T learningRate, const T decayRate, const unsigned long n)
{
unsigned long idx;
T _msDelta, rmsDelta;
T _msDeriv, rmsDeriv;
T deriv, delta;
const T bound = (T)10;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
_msDeriv = msDeriv[idx];
_msDelta = msDelta[idx];
deriv = derivs[idx];
_msDeriv = decayRate * _msDeriv + ((T)1 - decayRate) * deriv * deriv;
rmsDeriv = _sqrt<T>(_msDeriv) + (T)1e-20;
rmsDelta = _sqrt<T>(_msDelta + learningRate * learningRate);
delta = rmsDelta * min(bound, max(-bound, deriv / rmsDeriv));
_msDelta = decayRate * _msDelta + ((T)1 - decayRate) * delta * delta;
deltas[idx] = delta;
msDeriv[idx] = _msDeriv;
msDelta[idx] = _msDelta;
}
template<class T>
void MemSet(T *_x, const T val, const unsigned long n, const hipStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( MemSetKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, val, n);
}
template<class T>
void ElemMult(const T *_x, const T *_y, T *_z, const unsigned long n, const hipStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( ElemMultKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, _z, n);
}
template<class T>
void Add(const T *_x, const T *_y, T *_z, const unsigned long n, const hipStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( AddKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, _z, n);
}
/* IBM check start */
/* Signal quantization kernel call for Sigmoid */
template<class T>
void FuncSigmoid(const T *_x, T *_y, T *_y_fixed, const unsigned long n, const hipStream_t stream,FLOAT delta)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( FuncSigmoidKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, _y_fixed, n, delta);
}
/* Signal quantization kernel for Sigmoid */
/* IBM check end */
/* IBM check start */
/* Signal quantization kernel call for Tanh */
template<class T>
void FuncTanh(const T *_x, T *_y, const unsigned long n, const hipStream_t stream,FLOAT delta)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( FuncTanhKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, n, delta);
}
/* Signal quantization kernel for Tanh */
/* IBM check end */
/* IBM check start */
/* Weight quantization kernel call */
template<class T>
void WeightQuant(const T *_x, T *_y, const unsigned long n,const hipStream_t stream, FLOAT delta, int M)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( WeightQuantKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, n, delta, M);
}
/* Weight quantization kernel call */
/* IBM check end */
template<class T>
void FuncSoftplus(const T *_x, T *_y, const unsigned long n, const hipStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( FuncSoftplusKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, n);
}
/* IBM check start */
/* Signal quantization kernel call for Rectlinear*/
template<class T>
void FuncRectLinear(const T *_x, T *_y, T *_y_fixed,const unsigned long n, const hipStream_t stream, FLOAT delta, int M,int relu_delta_final_decision)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( FuncRectLinearKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, _y_fixed,n,delta,M,relu_delta_final_decision);
}
/* Signal quantization kernel call for rectlinear*/
/* IBM check end */
template<class T>
void FuncSoftmax(const T *_x, T *_y, const unsigned long layerSize, const unsigned long batchSize, const hipStream_t stream)
{
dim3 dimGrid(batchSize);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( FuncSoftmaxKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, layerSize);
}
template<class T>
void FuncBoundRange(const T *_x, T *_y, const T min, const T max, const unsigned long n, const hipStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( FuncBoundRangeKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, min, max, n);
}
template<class T>
void FuncSigmoidDeriv(const T *_x, T *_y, const unsigned long n, const hipStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( FuncSigmoidDerivKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, n);
}
template<class T>
void FuncTanhDeriv(const T *_x, T *_y, const unsigned long n, const hipStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( FuncTanhDerivKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, n);
}
template<class T>
void FuncSoftplusDeriv(const T *_x, T *_y, const unsigned long n, const hipStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( FuncSoftplusDerivKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, n);
}
template<class T>
void FuncRectLinearDeriv(const T *_x, T *_y, const unsigned long n, const hipStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( FuncRectLinearDerivKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _x, _y, n);
}
template<class T>
void GenerateDropoutMask(T *_mask, const T *_uniformDist, const unsigned long n, const T dropoutRate, const hipStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( GenerateDropoutMaskKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _mask, _uniformDist, n, dropoutRate);
}
template<class T>
void Rmsprop(T *_newDerivs, const T *_derivs, T *_msDeriv, const T decayRate, const unsigned long n, const hipStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( RmspropKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _newDerivs, _derivs, _msDeriv, decayRate, n);
}
template<class T>
void Adadelta(T *_deltas, const T *_derivs, T *_msDeriv, T *_msDelta, const T learningRate, const T decayRate, const unsigned long n, const hipStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
hipLaunchKernelGGL(( AdadeltaKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream, _deltas, _derivs, _msDeriv, _msDelta, learningRate, decayRate, n);
}
template void MemSet<float>(float *_x, const float val, const unsigned long n, const hipStream_t stream);
template void MemSet<double>(double *_x, const double val, const unsigned long n, const hipStream_t stream);
template void ElemMult<float>(const float *_x, const float *_y, float *_z, const unsigned long n, const hipStream_t stream);
template void ElemMult<double>(const double *_x, const double *_y, double *_z, const unsigned long n, const hipStream_t stream);
template void Add<float>(const float *_x, const float *_y, float *_z, const unsigned long n, const hipStream_t stream);
template void Add<double>(const double *_x, const double *_y, double *_z, const unsigned long n, const hipStream_t stream);
/* IBM check start */
template void FuncSigmoid<float>(const float *_x, float *_y, float *_y_fixed,const unsigned long n, const hipStream_t stream, FLOAT delta);
template void FuncSigmoid<double>(const double *_x, double *_y, double *_y_fixed, const unsigned long n, const hipStream_t stream, FLOAT delta);
template void FuncTanh<float>(const float *_x, float *_y, const unsigned long n, const hipStream_t stream,FLOAT delta);
template void FuncTanh<double>(const double *_x, double *_y, const unsigned long n, const hipStream_t stream,FLOAT delta);
template void WeightQuant<float>(const float *_x, float *_y, const unsigned long n,const hipStream_t stream, FLOAT delta, int M);
template void WeightQuant<double>(const double *_x, double *_y, const unsigned long n,const hipStream_t stream, FLOAT delta, int M);
template void FuncRectLinear<float>(const float *_x, float *_y, float *_y_fixed,const unsigned long n, const hipStream_t stream, FLOAT delta,int M,int relu_delta_final_decision);
template void FuncRectLinear<double>(const double *_x, double *_y, double *_y_fixed, const unsigned long n, const hipStream_t stream, FLOAT delta, int M,int relu_delta_final_decision);
/* IBM check end */
template void FuncSoftplus<float>(const float *_x, float *_y, const unsigned long n, const hipStream_t stream);
template void FuncSoftplus<double>(const double *_x, double *_y, const unsigned long n, const hipStream_t stream);
template void FuncSoftmax<float>(const float *_x, float *_y, const unsigned long layerSize, const unsigned long batchSize, const hipStream_t stream);
template void FuncSoftmax<double>(const double *_x, double *_y, const unsigned long layerSize, const unsigned long batchSize, const hipStream_t stream);
template void FuncBoundRange<float>(const float *_x, float *_y, const float min, const float max, const unsigned long n, const hipStream_t stream);
template void FuncBoundRange<double>(const double *_x, double *_y, const double min, const double max, const unsigned long n, const hipStream_t stream);
template void FuncSigmoidDeriv<float>(const float *_x, float *_y, const unsigned long n, const hipStream_t stream);
template void FuncSigmoidDeriv<double>(const double *_x, double *_y, const unsigned long n, const hipStream_t stream);
template void FuncTanhDeriv<float>(const float *_x, float *_y, const unsigned long n, const hipStream_t stream);
template void FuncTanhDeriv<double>(const double *_x, double *_y, const unsigned long n, const hipStream_t stream);
template void FuncSoftplusDeriv<float>(const float *_x, float *_y, const unsigned long n, const hipStream_t stream);
template void FuncSoftplusDeriv<double>(const double *_x, double *_y, const unsigned long n, const hipStream_t stream);
template void FuncRectLinearDeriv<float>(const float *_x, float *_y, const unsigned long n, const hipStream_t stream);
template void FuncRectLinearDeriv<double>(const double *_x, double *_y, const unsigned long n, const hipStream_t stream);
template void GenerateDropoutMask<float>(float *_mask, const float *_uniformDist, const unsigned long n, const float dropoutRate, const hipStream_t stream);
template void GenerateDropoutMask<double>(double *_mask, const double *_uniformDist, const unsigned long n, const double dropoutRate, const hipStream_t stream);
template void Rmsprop<float>(float *_newDerivs, const float *_derivs, float *_msDeriv, const float decayRate, const unsigned long n, const hipStream_t stream);
template void Rmsprop<double>(double *_newDerivs, const double *_derivs, double *_msDeriv, const double decayRate, const unsigned long n, const hipStream_t stream);
template void Adadelta<float>(float *_deltas, const float *_derivs, float *_msDeriv, float *_msDelta, const float learningRate, const float decayRate, const unsigned long n, const hipStream_t stream);
template void Adadelta<double>(double *_deltas, const double *_derivs, double *_msDeriv, double *_msDelta, const double learningRate, const double decayRate, const unsigned long n, const hipStream_t stream);
}
}
| ca30eb22977f21309456233e1168276a5b6c4df0.cu | /*
Copyright 2015 Kyuyeon Hwang ([email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "CudaKernels.h"
#include <stdio.h>
#define THREAD_PER_BLOCK 512
namespace fractal
{
namespace cudaKernels
{
template<class T>
inline __device__ T _exp(const T x);
template<class T>
inline __device__ T _log(const T x);
template<class T>
inline __device__ T _sqrt(const T x);
template<class T>
static __global__ void MemSetKernel(T *x, const T val, const unsigned long n);
template<class T>
static __global__ void ElemMultKernel(const T *x, const T *y, T *z, const unsigned long n);
template<class T>
static __global__ void AddKernel(const T *x, const T *y, T *z, const unsigned long n);
template<class T>
static __global__ void FuncSigmoidKernel(const T *x, T *y, const unsigned long n, FLOAT delta);
/* IBM check start */
/* template signal quantization for tanh */
template<class T>
static __global__ void FuncTanhKernel(const T *x, T *y, const unsigned long n,FLOAT delta);
/* template signal quantization for tanh */
/* IBM check end */
/* IBM check start */
/* template weight quantization */
template<class T>
static __global__ void WeightQuantKernel(const T *x, T *y, const unsigned long n,FLOAT delta, int M);
/* template weight quantization */
/* IBM check end */
template<class T>
static __global__ void FuncSoftplusKernel(const T *x, T *y, const unsigned long n);
/* IBM check start */
/* template signal quantization for rectlinear */
template<class T>
static __global__ void FuncRectLinearKernel(const T *x, T *y,T *y_fixed , const unsigned long n,FLOAT delta, int M,int relu_delta_final_decision);
/* template signal quantization for rectlinear */
/* IBM check end */
template<class T>
static __global__ void FuncSoftmaxKernel(const T *x, T *y, const unsigned long n);
template<class T>
static __global__ void FuncBoundRangeKernel(const T *x, T *y, const T _min, const T _max, const unsigned long n);
template<class T>
static __global__ void FuncSigmoidDerivKernel(const T *x, T *y, const unsigned long n);
template<class T>
static __global__ void FuncTanhDerivKernel(const T *x, T *y, const unsigned long n);
template<class T>
static __global__ void FuncSoftplusDerivKernel(const T *x, T *y, const unsigned long n);
template<class T>
static __global__ void FuncRectLinearDerivKernel(const T *x, T *y, const unsigned long n);
template<class T>
static __global__ void GenerateDropoutMaskKernel(T *mask, const T *uniformDist, const unsigned long n, const T dropoutRate);
template<class T>
static __global__ void RmspropKernel(T *newDerivs, const T *derivs, T *msDeriv, const T decayRate, const unsigned long n);
template<class T>
static __global__ void AdadeltaKernel(T *deltas, const T *derivs, T *msDeriv, T *msDelta, const T learningRate, const T decayRate, const unsigned long n);
template<>
inline __device__ float _exp<float>(const float x)
{
return min(__expf(x), 1e30);
}
template<>
inline __device__ double _exp<double>(const double x)
{
return min(exp(x), 1e300);
}
template<>
inline __device__ float _log<float>(const float x)
{
return __logf(x);
}
template<>
inline __device__ double _log<double>(const double x)
{
return log(x);
}
template<>
inline __device__ float _sqrt<float>(const float x)
{
return __fsqrt_rn(x);
}
template<>
inline __device__ double _sqrt<double>(const double x)
{
return __dsqrt_rn(x);
}
template<class T>
static __global__ void MemSetKernel(T *x, const T val, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
x[idx] = val;
}
template<class T>
static __global__ void ElemMultKernel(const T *x, const T *y, T *z, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
z[idx] = x[idx] * y[idx];
}
template<class T>
static __global__ void AddKernel(const T *x, const T *y, T *z, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
z[idx] = x[idx] + y[idx];
}
/* IBM check start */
/* Signal quantization kernel for Sigmoid */
/* If the QUANT_RELU flag is on Quantization Model */
template<class T>
static __global__ void FuncSigmoidKernel(const T *x, T *y,T *y_fixed, const unsigned long n, FLOAT delta)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
y_fixed[idx] = (T)1 / ((T)1 + _exp<T>(-x[idx]));
y[idx] = (T)1 / ((T)1 + _exp<T>(-x[idx]));
#if QUANT_RELU // condition for signal quantization
if((delta <101.0 && delta > 99.0) == 0)
{
y[idx] = floor((fabs(y_fixed[idx])/delta)+(T)0.5);
y[idx] = y[idx]*delta;
}
#endif
}
/* Signal quantization kernel for Sigmoid */
/* IBM check end */
/* IBM check start */
/* Signal quantization kernel for Tanh */
/* If the QUANT_RELU flag is on Quantization Model */
template<class T>
static __global__ void FuncTanhKernel(const T *x, T *y, const unsigned long n, FLOAT delta)
{
unsigned long idx;
T v;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
v = _exp<T>((T)(-2) * x[idx]);
y[idx] = (T)2 / ((T)1 + v) - (T)1;
#if QUANT_RELU
if((delta <101.0 && delta > 99.0) == 0)
{
T sign_;
sign_ = signbit(y[idx]);
if(sign_ != 0)
y[idx] = -1 * min(floor((fabs(y[idx])/delta)+(T)0.5),(1/delta));
else
y[idx] = min(floor((fabs(y[idx])/delta)+(T)0.5),(1/delta));
y[idx] = y[idx]*delta;
}
#endif
}
/* Signal quantization kernel for Tanh */
/* IBM check end */
/* IBM check start */
/* Weight quantization kernel */
template<class T>
static __global__ void WeightQuantKernel(const T *x, T *y, const unsigned long n, FLOAT delta,int M)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
int sign_;
sign_ = signbit(x[idx]);
if(sign_ != 0)
y[idx] = (T)-1 * min((T)floor((fabs(x[idx])/delta)+(T)0.5),(T)(M-1)/2);
else
y[idx] = min((T)floor((fabs(x[idx])/delta)+(T)0.5),(T)((M-1)/2));
y[idx] = y[idx]*delta;
}
/* Weight quantization kernel */
/* IBM check end */
template<class T>
static __global__ void FuncSoftplusKernel(const T *x, T *y, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
y[idx] = _log<T>((T)1 + _exp<T>(x[idx]));
}
/* IBM check start */
/* Signal quantization kernel for Rectlinear */
/* If the QUANT_RELU flag is on Quantization Model */
template<class T>
static __global__ void FuncRectLinearKernel(const T *x, T *y, T *y_fixed, const unsigned long n,FLOAT delta, int M,int relu_delta_final_decision)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
//y[idx] = max((T)0, x[idx]);
/* Leaky */
y[idx] = max((T)0.01 * x[idx], x[idx]);
y_fixed[idx] = max((T)0.01 * x[idx], x[idx]);
#if QUANT_RELU
if(relu_delta_final_decision == 1)
{
//if(threadIdx.x == 1 )printf("pre : %f\n",y[idx]);
y[idx] = min((T)floor((y[idx]/delta)+(T)0.5),(T)(M-1));
y[idx] = y[idx]*delta;
//if(threadIdx.x == 1) printf("after : %f\n",y[idx]);
}
#endif
}
/* Signal quantization kernel for Rectlinear */
/* IBM check end */
template<class T>
static __global__ void FuncSoftmaxKernel(const T *x, T *y, const unsigned long n)
{
__shared__ T _v[THREAD_PER_BLOCK];
T v_tmp, v_max;
unsigned long i;
x += blockIdx.x * n;
y += blockIdx.x * n;
/* Sequential reduction(max) */
v_tmp = threadIdx.x < n ? x[threadIdx.x] : (FLOAT) 0;
#pragma unroll
for(i = threadIdx.x + blockDim.x; i < n; i += blockDim.x)
{
v_tmp = max(v_tmp, x[i]);
}
_v[threadIdx.x] = v_tmp;
__syncthreads();
/* Parallel reduction(max) */
i = (blockDim.x >> 1);
if(threadIdx.x < i)
v_tmp = _v[threadIdx.x];
for(; i > 0; i >>= 1)
{
if(threadIdx.x < i && threadIdx.x + i < n)
{
v_tmp = max(v_tmp, _v[threadIdx.x + i]);
_v[threadIdx.x] = v_tmp;
}
__syncthreads();
}
v_max = _v[0];
__syncthreads();
/* Sequential reduction(+) */
v_tmp = (T) 0;
#pragma unroll
for(i = threadIdx.x; i < n; i += blockDim.x)
{
v_tmp += _exp<T>(x[i] - v_max);
}
_v[threadIdx.x] = v_tmp;
__syncthreads();
/* Parallel reduction(+) */
i = (blockDim.x >> 1);
if(threadIdx.x < i)
v_tmp = _v[threadIdx.x];
for(; i > 0; i >>= 1)
{
if(threadIdx.x < i)
{
v_tmp += _v[threadIdx.x + i];
_v[threadIdx.x] = v_tmp;
}
__syncthreads();
}
/* Update */
v_tmp = _v[0];
#pragma unroll
for(i = threadIdx.x; i < n; i += blockDim.x)
{
y[i] = _exp<T>(x[i] - v_max) / v_tmp;
}
}
template<class T>
static __global__ void FuncBoundRangeKernel(const T *x, T *y, const T _min, const T _max, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
y[idx] = min(_max, max(_min, x[idx]));
}
template<class T>
static __global__ void FuncSigmoidDerivKernel(const T *x, T *y, const unsigned long n)
{
unsigned long idx;
T v;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
v = x[idx];
y[idx] = v * ((T)1 - v);
}
template<class T>
static __global__ void FuncTanhDerivKernel(const T *x, T *y, const unsigned long n)
{
unsigned long idx;
T v;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
v = x[idx];
y[idx] = ((T)1 - v) * ((T)1 + v);
}
template<class T>
static __global__ void FuncSoftplusDerivKernel(const T *x, T *y, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
y[idx] = (T)1 - _exp<T>(-x[idx]);
}
template<class T>
static __global__ void FuncRectLinearDerivKernel(const T *x, T *y, const unsigned long n)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
//y[idx] = (T)(x[idx] > (T)0);
/* Leaky */
y[idx] = (T)0.01 + (T)0.99 * (T)(x[idx] > (T)0);
}
template<class T>
static __global__ void GenerateDropoutMaskKernel(T *mask, const T *uniformDist, const unsigned long n, const T dropoutRate)
{
unsigned long idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
mask[idx] = (T)(uniformDist[idx] >= dropoutRate) / ((T)1 - dropoutRate);
}
template<class T>
static __global__ void RmspropKernel(T *newDerivs, const T *derivs, T *msDeriv, const T decayRate, const unsigned long n)
{
unsigned long idx;
T ms, rms, deriv;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
ms = msDeriv[idx];
deriv = derivs[idx];
T bound = _sqrt<T>((T)1 / ((T)1 - decayRate));
ms = decayRate * ms + ((T)1 - decayRate) * deriv * deriv;
rms = _sqrt<T>(ms) + (T)1e-20;
newDerivs[idx] = min(bound, max(-bound, deriv / rms));
msDeriv[idx] = ms;
}
template<class T>
static __global__ void AdadeltaKernel(T *deltas, const T *derivs, T *msDeriv, T *msDelta, const T learningRate, const T decayRate, const unsigned long n)
{
unsigned long idx;
T _msDelta, rmsDelta;
T _msDeriv, rmsDeriv;
T deriv, delta;
const T bound = (T)10;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= n) return;
_msDeriv = msDeriv[idx];
_msDelta = msDelta[idx];
deriv = derivs[idx];
_msDeriv = decayRate * _msDeriv + ((T)1 - decayRate) * deriv * deriv;
rmsDeriv = _sqrt<T>(_msDeriv) + (T)1e-20;
rmsDelta = _sqrt<T>(_msDelta + learningRate * learningRate);
delta = rmsDelta * min(bound, max(-bound, deriv / rmsDeriv));
_msDelta = decayRate * _msDelta + ((T)1 - decayRate) * delta * delta;
deltas[idx] = delta;
msDeriv[idx] = _msDeriv;
msDelta[idx] = _msDelta;
}
template<class T>
void MemSet(T *_x, const T val, const unsigned long n, const cudaStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
MemSetKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, val, n);
}
template<class T>
void ElemMult(const T *_x, const T *_y, T *_z, const unsigned long n, const cudaStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
ElemMultKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, _z, n);
}
template<class T>
void Add(const T *_x, const T *_y, T *_z, const unsigned long n, const cudaStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
AddKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, _z, n);
}
/* IBM check start */
/* Signal quantization kernel call for Sigmoid */
template<class T>
void FuncSigmoid(const T *_x, T *_y, T *_y_fixed, const unsigned long n, const cudaStream_t stream,FLOAT delta)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
FuncSigmoidKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, _y_fixed, n, delta);
}
/* Signal quantization kernel for Sigmoid */
/* IBM check end */
/* IBM check start */
/* Signal quantization kernel call for Tanh */
template<class T>
void FuncTanh(const T *_x, T *_y, const unsigned long n, const cudaStream_t stream,FLOAT delta)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
FuncTanhKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, n, delta);
}
/* Signal quantization kernel for Tanh */
/* IBM check end */
/* IBM check start */
/* Weight quantization kernel call */
template<class T>
void WeightQuant(const T *_x, T *_y, const unsigned long n,const cudaStream_t stream, FLOAT delta, int M)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
WeightQuantKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, n, delta, M);
}
/* Weight quantization kernel call */
/* IBM check end */
template<class T>
void FuncSoftplus(const T *_x, T *_y, const unsigned long n, const cudaStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
FuncSoftplusKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, n);
}
/* IBM check start */
/* Signal quantization kernel call for Rectlinear*/
template<class T>
void FuncRectLinear(const T *_x, T *_y, T *_y_fixed,const unsigned long n, const cudaStream_t stream, FLOAT delta, int M,int relu_delta_final_decision)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
FuncRectLinearKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, _y_fixed,n,delta,M,relu_delta_final_decision);
}
/* Signal quantization kernel call for rectlinear*/
/* IBM check end */
template<class T>
void FuncSoftmax(const T *_x, T *_y, const unsigned long layerSize, const unsigned long batchSize, const cudaStream_t stream)
{
dim3 dimGrid(batchSize);
dim3 dimBlock(THREAD_PER_BLOCK);
FuncSoftmaxKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, layerSize);
}
template<class T>
void FuncBoundRange(const T *_x, T *_y, const T min, const T max, const unsigned long n, const cudaStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
FuncBoundRangeKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, min, max, n);
}
template<class T>
void FuncSigmoidDeriv(const T *_x, T *_y, const unsigned long n, const cudaStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
FuncSigmoidDerivKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, n);
}
template<class T>
void FuncTanhDeriv(const T *_x, T *_y, const unsigned long n, const cudaStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
FuncTanhDerivKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, n);
}
template<class T>
void FuncSoftplusDeriv(const T *_x, T *_y, const unsigned long n, const cudaStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
FuncSoftplusDerivKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, n);
}
template<class T>
void FuncRectLinearDeriv(const T *_x, T *_y, const unsigned long n, const cudaStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
FuncRectLinearDerivKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_x, _y, n);
}
template<class T>
void GenerateDropoutMask(T *_mask, const T *_uniformDist, const unsigned long n, const T dropoutRate, const cudaStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
GenerateDropoutMaskKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_mask, _uniformDist, n, dropoutRate);
}
template<class T>
void Rmsprop(T *_newDerivs, const T *_derivs, T *_msDeriv, const T decayRate, const unsigned long n, const cudaStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
RmspropKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_newDerivs, _derivs, _msDeriv, decayRate, n);
}
template<class T>
void Adadelta(T *_deltas, const T *_derivs, T *_msDeriv, T *_msDelta, const T learningRate, const T decayRate, const unsigned long n, const cudaStream_t stream)
{
dim3 dimGrid((n + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK);
dim3 dimBlock(THREAD_PER_BLOCK);
AdadeltaKernel<T><<<dimGrid, dimBlock, 0, stream>>>(_deltas, _derivs, _msDeriv, _msDelta, learningRate, decayRate, n);
}
template void MemSet<float>(float *_x, const float val, const unsigned long n, const cudaStream_t stream);
template void MemSet<double>(double *_x, const double val, const unsigned long n, const cudaStream_t stream);
template void ElemMult<float>(const float *_x, const float *_y, float *_z, const unsigned long n, const cudaStream_t stream);
template void ElemMult<double>(const double *_x, const double *_y, double *_z, const unsigned long n, const cudaStream_t stream);
template void Add<float>(const float *_x, const float *_y, float *_z, const unsigned long n, const cudaStream_t stream);
template void Add<double>(const double *_x, const double *_y, double *_z, const unsigned long n, const cudaStream_t stream);
/* IBM check start */
template void FuncSigmoid<float>(const float *_x, float *_y, float *_y_fixed,const unsigned long n, const cudaStream_t stream, FLOAT delta);
template void FuncSigmoid<double>(const double *_x, double *_y, double *_y_fixed, const unsigned long n, const cudaStream_t stream, FLOAT delta);
template void FuncTanh<float>(const float *_x, float *_y, const unsigned long n, const cudaStream_t stream,FLOAT delta);
template void FuncTanh<double>(const double *_x, double *_y, const unsigned long n, const cudaStream_t stream,FLOAT delta);
template void WeightQuant<float>(const float *_x, float *_y, const unsigned long n,const cudaStream_t stream, FLOAT delta, int M);
template void WeightQuant<double>(const double *_x, double *_y, const unsigned long n,const cudaStream_t stream, FLOAT delta, int M);
template void FuncRectLinear<float>(const float *_x, float *_y, float *_y_fixed,const unsigned long n, const cudaStream_t stream, FLOAT delta,int M,int relu_delta_final_decision);
template void FuncRectLinear<double>(const double *_x, double *_y, double *_y_fixed, const unsigned long n, const cudaStream_t stream, FLOAT delta, int M,int relu_delta_final_decision);
/* IBM check end */
template void FuncSoftplus<float>(const float *_x, float *_y, const unsigned long n, const cudaStream_t stream);
template void FuncSoftplus<double>(const double *_x, double *_y, const unsigned long n, const cudaStream_t stream);
template void FuncSoftmax<float>(const float *_x, float *_y, const unsigned long layerSize, const unsigned long batchSize, const cudaStream_t stream);
template void FuncSoftmax<double>(const double *_x, double *_y, const unsigned long layerSize, const unsigned long batchSize, const cudaStream_t stream);
template void FuncBoundRange<float>(const float *_x, float *_y, const float min, const float max, const unsigned long n, const cudaStream_t stream);
template void FuncBoundRange<double>(const double *_x, double *_y, const double min, const double max, const unsigned long n, const cudaStream_t stream);
template void FuncSigmoidDeriv<float>(const float *_x, float *_y, const unsigned long n, const cudaStream_t stream);
template void FuncSigmoidDeriv<double>(const double *_x, double *_y, const unsigned long n, const cudaStream_t stream);
template void FuncTanhDeriv<float>(const float *_x, float *_y, const unsigned long n, const cudaStream_t stream);
template void FuncTanhDeriv<double>(const double *_x, double *_y, const unsigned long n, const cudaStream_t stream);
template void FuncSoftplusDeriv<float>(const float *_x, float *_y, const unsigned long n, const cudaStream_t stream);
template void FuncSoftplusDeriv<double>(const double *_x, double *_y, const unsigned long n, const cudaStream_t stream);
template void FuncRectLinearDeriv<float>(const float *_x, float *_y, const unsigned long n, const cudaStream_t stream);
template void FuncRectLinearDeriv<double>(const double *_x, double *_y, const unsigned long n, const cudaStream_t stream);
template void GenerateDropoutMask<float>(float *_mask, const float *_uniformDist, const unsigned long n, const float dropoutRate, const cudaStream_t stream);
template void GenerateDropoutMask<double>(double *_mask, const double *_uniformDist, const unsigned long n, const double dropoutRate, const cudaStream_t stream);
template void Rmsprop<float>(float *_newDerivs, const float *_derivs, float *_msDeriv, const float decayRate, const unsigned long n, const cudaStream_t stream);
template void Rmsprop<double>(double *_newDerivs, const double *_derivs, double *_msDeriv, const double decayRate, const unsigned long n, const cudaStream_t stream);
template void Adadelta<float>(float *_deltas, const float *_derivs, float *_msDeriv, float *_msDelta, const float learningRate, const float decayRate, const unsigned long n, const cudaStream_t stream);
template void Adadelta<double>(double *_deltas, const double *_derivs, double *_msDeriv, double *_msDelta, const double learningRate, const double decayRate, const unsigned long n, const cudaStream_t stream);
}
}
|
0ae3c4a8d184fe56ac8f13e0ab1e646ba2cffbf6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "local_contrast_subtractive_2d_layer_updater_cuda.h"
#include "../local_contrast_subtractive_layer.h"
#include "../neural_network_exception.h"
#include "util_cuda.h"
#include <memory>
namespace nnforge
{
namespace cuda
{
__global__ void local_contrast_subtractive_2d_blur_horizontal_upd_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int window_width,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_width; ++i)
{
current_weights++;
if (i < x + 1)
current_input_low--;
if (i > x + 1)
current_input_low++;
if (i < width - x)
current_input_high++;
if (i > width - x)
current_input_high--;
res += (*current_input_low + *current_input_high) * *current_weights;
}
output[(z * height + y) * width + x] = res;
}
}
template<int WINDOW_WIDTH>
__global__ void local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
current_weights++;
if (i < x + 1)
current_input_low--;
if (i > x + 1)
current_input_low++;
if (i < width - x)
current_input_high++;
if (i > width - x)
current_input_high--;
res += (*current_input_low + *current_input_high) * *current_weights;
}
output[(z * height + y) * width + x] = res;
}
}
__global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_upd_kernel(
const float * __restrict input,
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int window_height,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_height; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = original_input[offset] - res;
}
}
template<int WINDOW_HEIGHT>
__global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel(
const float * __restrict input,
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_HEIGHT; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = original_input[offset] - res;
}
}
__global__ void local_contrast_subtractive_2d_copy_unaffected_upd_kernel(
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict unaffected_feature_map_list,
int input_feature_map_count,
int unaffected_feature_map_count,
int elem_count_per_fature_map,
int entry_count)
{
int elem_id = blockIdx.x * blockDim.x + threadIdx.x;
int unaffected_feature_map_index = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (elem_id < elem_count_per_fature_map) && (unaffected_feature_map_index < unaffected_feature_map_count) && (entry_id < entry_count);
if (in_bounds)
{
int unaffected_feature_map_id = unaffected_feature_map_list[unaffected_feature_map_index];
int offset = (entry_id * input_feature_map_count + unaffected_feature_map_id) * elem_count_per_fature_map + elem_id;
output[offset] = original_input[offset];
}
}
void local_contrast_subtractive_2d_layer_updater_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_1st_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[0])
{
case 1:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<1>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<2>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<3>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<4>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<5>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<6>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<7>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<8>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<9>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<10>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_upd_kernel), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id,
*input_buffers[0],
*temporary_working_per_entry_buffer,
*schema_data[0],
*schema_data[1],
output_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[0],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
std::pair<dim3, dim3> kernel_2nd_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[1])
{
case 1:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<1>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<2>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<3>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<4>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<5>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<6>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<7>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<8>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<9>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<10>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_upd_kernel), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id,
*temporary_working_per_entry_buffer,
*input_buffers[0],
*output_buffer,
*schema_data[0],
*schema_data[2],
output_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
if ((unaffected_feature_map_count > 0) && ((const float *)*input_buffers[0] != (const float *)*output_buffer))
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_elem_count_per_feature_map,
unaffected_feature_map_count,
entry_count);
hipLaunchKernelGGL(( local_contrast_subtractive_2d_copy_unaffected_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_buffers[0],
*output_buffer,
*schema_data[3],
output_configuration_specific.feature_map_count,
unaffected_feature_map_count,
output_elem_count_per_feature_map,
entry_count);
}
}
void local_contrast_subtractive_2d_layer_updater_cuda::updater_configured()
{
std::shared_ptr<const local_contrast_subtractive_layer> layer_derived = std::dynamic_pointer_cast<const local_contrast_subtractive_layer>(layer_schema);
affected_feature_map_count = static_cast<int>(layer_derived->feature_maps_affected.size());
unaffected_feature_map_count = static_cast<int>(layer_derived->feature_maps_unaffected.size());
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
half_window_sizes.push_back(static_cast<int>((*it + 1) >> 1));
central_mult = 1.0F - (2.0F * layer_derived->window_weights_list[0][0] * layer_derived->window_weights_list[1][0]);
}
size_t local_contrast_subtractive_2d_layer_updater_cuda::get_temporary_working_per_entry_buffer_size(const layer_action& action) const
{
return output_elem_count_per_feature_map * affected_feature_map_count * sizeof(float);
}
int local_contrast_subtractive_2d_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const
{
return 0;
}
}
}
| 0ae3c4a8d184fe56ac8f13e0ab1e646ba2cffbf6.cu | /*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "local_contrast_subtractive_2d_layer_updater_cuda.h"
#include "../local_contrast_subtractive_layer.h"
#include "../neural_network_exception.h"
#include "util_cuda.h"
#include <memory>
namespace nnforge
{
namespace cuda
{
__global__ void local_contrast_subtractive_2d_blur_horizontal_upd_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int window_width,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_width; ++i)
{
current_weights++;
if (i < x + 1)
current_input_low--;
if (i > x + 1)
current_input_low++;
if (i < width - x)
current_input_high++;
if (i > width - x)
current_input_high--;
res += (*current_input_low + *current_input_high) * *current_weights;
}
output[(z * height + y) * width + x] = res;
}
}
template<int WINDOW_WIDTH>
__global__ void local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
current_weights++;
if (i < x + 1)
current_input_low--;
if (i > x + 1)
current_input_low++;
if (i < width - x)
current_input_high++;
if (i > width - x)
current_input_high--;
res += (*current_input_low + *current_input_high) * *current_weights;
}
output[(z * height + y) * width + x] = res;
}
}
__global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_upd_kernel(
const float * __restrict input,
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int window_height,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_height; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = original_input[offset] - res;
}
}
template<int WINDOW_HEIGHT>
__global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel(
const float * __restrict input,
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_HEIGHT; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = original_input[offset] - res;
}
}
__global__ void local_contrast_subtractive_2d_copy_unaffected_upd_kernel(
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict unaffected_feature_map_list,
int input_feature_map_count,
int unaffected_feature_map_count,
int elem_count_per_fature_map,
int entry_count)
{
int elem_id = blockIdx.x * blockDim.x + threadIdx.x;
int unaffected_feature_map_index = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (elem_id < elem_count_per_fature_map) && (unaffected_feature_map_index < unaffected_feature_map_count) && (entry_id < entry_count);
if (in_bounds)
{
int unaffected_feature_map_id = unaffected_feature_map_list[unaffected_feature_map_index];
int offset = (entry_id * input_feature_map_count + unaffected_feature_map_id) * elem_count_per_fature_map + elem_id;
output[offset] = original_input[offset];
}
}
void local_contrast_subtractive_2d_layer_updater_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_1st_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[0])
{
case 1:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<1><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<2><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<3><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<4><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<5><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<6><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<7><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<8><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<9><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<10><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
local_contrast_subtractive_2d_blur_horizontal_upd_kernel<<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(
*input_buffers[0],
*temporary_working_per_entry_buffer,
*schema_data[0],
*schema_data[1],
output_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[0],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
std::pair<dim3, dim3> kernel_2nd_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[1])
{
case 1:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<1><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<2><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<3><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<4><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<5><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<6><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<7><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<8><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<9><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<10><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
local_contrast_subtractive_2d_blur_vertical_and_subtract_upd_kernel<<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(
*temporary_working_per_entry_buffer,
*input_buffers[0],
*output_buffer,
*schema_data[0],
*schema_data[2],
output_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
if ((unaffected_feature_map_count > 0) && ((const float *)*input_buffers[0] != (const float *)*output_buffer))
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_elem_count_per_feature_map,
unaffected_feature_map_count,
entry_count);
local_contrast_subtractive_2d_copy_unaffected_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_buffers[0],
*output_buffer,
*schema_data[3],
output_configuration_specific.feature_map_count,
unaffected_feature_map_count,
output_elem_count_per_feature_map,
entry_count);
}
}
void local_contrast_subtractive_2d_layer_updater_cuda::updater_configured()
{
std::shared_ptr<const local_contrast_subtractive_layer> layer_derived = std::dynamic_pointer_cast<const local_contrast_subtractive_layer>(layer_schema);
affected_feature_map_count = static_cast<int>(layer_derived->feature_maps_affected.size());
unaffected_feature_map_count = static_cast<int>(layer_derived->feature_maps_unaffected.size());
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
half_window_sizes.push_back(static_cast<int>((*it + 1) >> 1));
central_mult = 1.0F - (2.0F * layer_derived->window_weights_list[0][0] * layer_derived->window_weights_list[1][0]);
}
size_t local_contrast_subtractive_2d_layer_updater_cuda::get_temporary_working_per_entry_buffer_size(const layer_action& action) const
{
return output_elem_count_per_feature_map * affected_feature_map_count * sizeof(float);
}
int local_contrast_subtractive_2d_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const
{
return 0;
}
}
}
|
b4703651df7ed939bdfe4db073c63c2742913f67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
# =============================================================================
# Copyright (C) 2016-2021 Blue Brain Project
#
# See top-level LICENSE file for details.
# =============================================================================.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "coreneuron/utils/randoms/nrnran123.h"
namespace coreneuron {
/* global data structure per process */
__device__ static const double SHIFT32 = 1.0 / 4294967297.0; /* 1/(2^32 + 1) */
__device__ static philox4x32_key_t k = {{0}};
__device__ static unsigned int instance_count_ = 0;
__device__ size_t nrnran123_instance_count() {
return instance_count_;
}
__device__ size_t nrnran123_state_size() {
return sizeof(nrnran123_State);
}
__global__ void nrnran123_set_globalindex(uint32_t gix) {
k.v[0] = gix;
}
/* if one sets the global, one should reset all the stream sequences. */
__device__ uint32_t nrnran123_get_globalindex() {
return k.v[0];
}
__global__ void nrnran123_setup_cuda_newstream(nrnran123_State* s,
uint32_t id1,
uint32_t id2,
uint32_t id3) {
s->c.v[0] = 0;
s->c.v[1] = id3;
s->c.v[2] = id1;
s->c.v[3] = id2;
nrnran123_setseq(s, 0, 0);
atomicAdd(&instance_count_, 1);
}
__global__ void nrnran123_cuda_deletestream(nrnran123_State* s) {
atomicSub(&instance_count_, 1);
}
__device__ void nrnran123_getseq(nrnran123_State* s, uint32_t* seq, unsigned char* which) {
*seq = s->c.v[0];
*which = s->which_;
}
__device__ void nrnran123_setseq(nrnran123_State* s, uint32_t seq, unsigned char which) {
if (which > 3) {
s->which_ = 0;
} else {
s->which_ = which;
}
s->c.v[0] = seq;
s->r = philox4x32(s->c, k);
}
__device__ void nrnran123_getids(nrnran123_State* s, uint32_t* id1, uint32_t* id2) {
*id1 = s->c.v[2];
*id2 = s->c.v[3];
}
__device__ void nrnran123_getids3(nrnran123_State* s, uint32_t* id1, uint32_t* id2, uint32_t* id3) {
*id3 = s->c.v[1];
*id1 = s->c.v[2];
*id2 = s->c.v[3];
}
__device__ uint32_t nrnran123_ipick(nrnran123_State* s) {
uint32_t rval;
unsigned char which = s->which_;
rval = s->r.v[which++];
if (which > 3) {
which = 0;
s->c.v[0]++;
s->r = philox4x32(s->c, k);
}
s->which_ = which;
return rval;
}
__device__ double nrnran123_dblpick(nrnran123_State* s) {
return nrnran123_uint2dbl(nrnran123_ipick(s));
}
__device__ double nrnran123_negexp(nrnran123_State* s) {
/* min 2.3283064e-10 to max 22.18071 */
return -log(nrnran123_dblpick(s));
}
/* at cost of a cached value we could compute two at a time. */
__device__ double nrnran123_normal(nrnran123_State* s) {
double w, x, y;
double u1, u2;
do {
u1 = nrnran123_dblpick(s);
u2 = nrnran123_dblpick(s);
u1 = 2. * u1 - 1.;
u2 = 2. * u2 - 1.;
w = (u1 * u1) + (u2 * u2);
} while (w > 1);
y = sqrt((-2. * log(w)) / w);
x = u1 * y;
return x;
}
__device__ double nrnran123_uint2dbl(uint32_t u) {
/* 0 to 2^32-1 transforms to double value in open (0,1) interval */
/* min 2.3283064e-10 to max (1 - 2.3283064e-10) */
return ((double)u + 1.0) * SHIFT32;
}
/* nrn123 streams are created from cpu launcher routine */
nrnran123_State* nrnran123_newstream(uint32_t id1, uint32_t id2) {
return nrnran123_newstream3(id1, id2, 0);
}
nrnran123_State* nrnran123_newstream3(uint32_t id1, uint32_t id2, uint32_t id3) {
nrnran123_State* s;
hipMalloc((void**)&s, sizeof(nrnran123_State));
hipMemset((void*)s, 0, sizeof(nrnran123_State));
hipLaunchKernelGGL(( nrnran123_setup_cuda_newstream), dim3(1), dim3(1), 0, 0, s, id1, id2, id3);
hipDeviceSynchronize();
return s;
}
/* nrn123 streams are destroyed from cpu launcher routine */
void nrnran123_deletestream(nrnran123_State* s) {
hipLaunchKernelGGL(( nrnran123_cuda_deletestream), dim3(1), dim3(1), 0, 0, s);
hipDeviceSynchronize();
hipFree(s);
}
/* set global index for random123 stream on gpu */
void nrnran123_set_gpu_globalindex(uint32_t gix) {
hipLaunchKernelGGL(( nrnran123_set_globalindex), dim3(1),dim3(1), 0, 0, gix);
hipDeviceSynchronize();
}
} //namespace coreneuron
| b4703651df7ed939bdfe4db073c63c2742913f67.cu | /*
# =============================================================================
# Copyright (C) 2016-2021 Blue Brain Project
#
# See top-level LICENSE file for details.
# =============================================================================.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "coreneuron/utils/randoms/nrnran123.h"
namespace coreneuron {
/* global data structure per process */
__device__ static const double SHIFT32 = 1.0 / 4294967297.0; /* 1/(2^32 + 1) */
__device__ static philox4x32_key_t k = {{0}};
__device__ static unsigned int instance_count_ = 0;
__device__ size_t nrnran123_instance_count() {
return instance_count_;
}
__device__ size_t nrnran123_state_size() {
return sizeof(nrnran123_State);
}
__global__ void nrnran123_set_globalindex(uint32_t gix) {
k.v[0] = gix;
}
/* if one sets the global, one should reset all the stream sequences. */
__device__ uint32_t nrnran123_get_globalindex() {
return k.v[0];
}
__global__ void nrnran123_setup_cuda_newstream(nrnran123_State* s,
uint32_t id1,
uint32_t id2,
uint32_t id3) {
s->c.v[0] = 0;
s->c.v[1] = id3;
s->c.v[2] = id1;
s->c.v[3] = id2;
nrnran123_setseq(s, 0, 0);
atomicAdd(&instance_count_, 1);
}
__global__ void nrnran123_cuda_deletestream(nrnran123_State* s) {
atomicSub(&instance_count_, 1);
}
__device__ void nrnran123_getseq(nrnran123_State* s, uint32_t* seq, unsigned char* which) {
*seq = s->c.v[0];
*which = s->which_;
}
__device__ void nrnran123_setseq(nrnran123_State* s, uint32_t seq, unsigned char which) {
if (which > 3) {
s->which_ = 0;
} else {
s->which_ = which;
}
s->c.v[0] = seq;
s->r = philox4x32(s->c, k);
}
__device__ void nrnran123_getids(nrnran123_State* s, uint32_t* id1, uint32_t* id2) {
*id1 = s->c.v[2];
*id2 = s->c.v[3];
}
__device__ void nrnran123_getids3(nrnran123_State* s, uint32_t* id1, uint32_t* id2, uint32_t* id3) {
*id3 = s->c.v[1];
*id1 = s->c.v[2];
*id2 = s->c.v[3];
}
__device__ uint32_t nrnran123_ipick(nrnran123_State* s) {
uint32_t rval;
unsigned char which = s->which_;
rval = s->r.v[which++];
if (which > 3) {
which = 0;
s->c.v[0]++;
s->r = philox4x32(s->c, k);
}
s->which_ = which;
return rval;
}
__device__ double nrnran123_dblpick(nrnran123_State* s) {
return nrnran123_uint2dbl(nrnran123_ipick(s));
}
__device__ double nrnran123_negexp(nrnran123_State* s) {
/* min 2.3283064e-10 to max 22.18071 */
return -log(nrnran123_dblpick(s));
}
/* at cost of a cached value we could compute two at a time. */
__device__ double nrnran123_normal(nrnran123_State* s) {
double w, x, y;
double u1, u2;
do {
u1 = nrnran123_dblpick(s);
u2 = nrnran123_dblpick(s);
u1 = 2. * u1 - 1.;
u2 = 2. * u2 - 1.;
w = (u1 * u1) + (u2 * u2);
} while (w > 1);
y = sqrt((-2. * log(w)) / w);
x = u1 * y;
return x;
}
__device__ double nrnran123_uint2dbl(uint32_t u) {
/* 0 to 2^32-1 transforms to double value in open (0,1) interval */
/* min 2.3283064e-10 to max (1 - 2.3283064e-10) */
return ((double)u + 1.0) * SHIFT32;
}
/* nrn123 streams are created from cpu launcher routine */
nrnran123_State* nrnran123_newstream(uint32_t id1, uint32_t id2) {
return nrnran123_newstream3(id1, id2, 0);
}
nrnran123_State* nrnran123_newstream3(uint32_t id1, uint32_t id2, uint32_t id3) {
nrnran123_State* s;
cudaMalloc((void**)&s, sizeof(nrnran123_State));
cudaMemset((void*)s, 0, sizeof(nrnran123_State));
nrnran123_setup_cuda_newstream<<<1, 1>>>(s, id1, id2, id3);
cudaDeviceSynchronize();
return s;
}
/* nrn123 streams are destroyed from cpu launcher routine */
void nrnran123_deletestream(nrnran123_State* s) {
nrnran123_cuda_deletestream<<<1, 1>>>(s);
cudaDeviceSynchronize();
cudaFree(s);
}
/* set global index for random123 stream on gpu */
void nrnran123_set_gpu_globalindex(uint32_t gix) {
nrnran123_set_globalindex<<<1,1>>>(gix);
cudaDeviceSynchronize();
}
} //namespace coreneuron
|
6de124442c1a7068bfa5c9a73863bf7799754b0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "support.h"
__global__ void kernelT(int m, int n, int k, const float *A, const float *B, float* C)
{
const unsigned int BLOCK_SIZE = 32;
const unsigned int TILE_WIDTH = 32;
__shared__ float subTileA[TILE_WIDTH][TILE_WIDTH];
__shared__ float subTileB[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * BLOCK_SIZE + ty;
int Col = bx * BLOCK_SIZE + tx;
float Cvalue = 0;
for (int i = 0; i < (k-1)/TILE_WIDTH + 1; i++)
{
if (Row < m &&(TILE_WIDTH*i + tx) < k)
subTileA[ty][tx] = A[Row*k + TILE_WIDTH*i+tx];
else
subTileA[ty][tx] = 0;
if (Col < n &&(TILE_WIDTH*i + ty) < k)
subTileB[ty][tx] = B[ty*n + TILE_WIDTH*n*i + Col];
else
subTileB[ty][tx] = 0;
__syncthreads();
if (Row < m && Col < n)
for (int j = 0; j < TILE_WIDTH; ++j)
Cvalue += subTileA[ty][j] * subTileB[j][tx];
__syncthreads();
}
if (Row < m && Col < n)
C[Row*n + Col] = Cvalue;
}
void midT(char transa, char transb, \
int m, int n, int k, \
float alpha, \
const float *A, int lda, \
const float *B, int ldb, \
float beta, \
float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
const unsigned int BLOCK_SIZE = 32;
unsigned int grid_y = (unsigned int) ceil((double)m / (double)BLOCK_SIZE);
unsigned int grid_x = (unsigned int) ceil((double)n / (double)BLOCK_SIZE);
dim3 gridDim(grid_x, grid_y);
dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE);
hipLaunchKernelGGL(( kernelT), dim3(gridDim), dim3(blockDim), 0, 0, m, n, k, A, B, C);
}
int main (int argc, char *argv[])
{
Timer timer;
printf("\nRunning Tiled..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned matArow, matAcol;
unsigned matBrow, matBcol;
dim3 dim_grid, dim_block;
if (argc == 1) {
matArow = 1000;
matAcol = matBrow = 1000;
matBcol = 1000;
} else if (argc == 2) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[1]);
matBcol = atoi(argv[1]);
} else if (argc == 4) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[2]);
matBcol = atoi(argv[3]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./sgemm # All matrices are 1000 x 1000"
"\n Usage: ./sgemm <m> # All matrices are m x m"
"\n Usage: ./sgemm <m> <k> <n> # A: m x k, B: k x n, C: m x n"
"\n");
exit(0);
}
A_sz = matArow*matAcol;
B_sz = matBrow*matBcol;
C_sz = matArow*matBcol;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) A_h[i] = (rand()%100)/100.00;
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) B_h[i] = (rand()%100)/100.00;
C_h = (float*) malloc( sizeof(float)*C_sz );
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n",
matArow, matAcol, matBrow, matBcol, matArow, matBcol);
hipMalloc((void **) &A_d, sizeof(float)*A_sz);
hipMalloc((void **) &B_d, sizeof(float)*B_sz);
hipMalloc((void **) &C_d, sizeof(float)*C_sz);
hipDeviceSynchronize();
hipMemcpy(A_d, A_h, sizeof(float)*A_sz, hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, sizeof(float)*B_sz, hipMemcpyHostToDevice);
hipDeviceSynchronize();
midT('N', 'N', matArow, matBcol, matBrow, 1.0f, \
A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow);
hipDeviceSynchronize();
hipMemcpy(C_h, C_d, sizeof(float)*C_sz, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, matArow, matAcol, matBcol);
free(A_h);
free(B_h);
free(C_h);
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
} | 6de124442c1a7068bfa5c9a73863bf7799754b0d.cu | #include <stdio.h>
#include <stdlib.h>
#include "support.h"
__global__ void kernelT(int m, int n, int k, const float *A, const float *B, float* C)
{
const unsigned int BLOCK_SIZE = 32;
const unsigned int TILE_WIDTH = 32;
__shared__ float subTileA[TILE_WIDTH][TILE_WIDTH];
__shared__ float subTileB[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * BLOCK_SIZE + ty;
int Col = bx * BLOCK_SIZE + tx;
float Cvalue = 0;
for (int i = 0; i < (k-1)/TILE_WIDTH + 1; i++)
{
if (Row < m &&(TILE_WIDTH*i + tx) < k)
subTileA[ty][tx] = A[Row*k + TILE_WIDTH*i+tx];
else
subTileA[ty][tx] = 0;
if (Col < n &&(TILE_WIDTH*i + ty) < k)
subTileB[ty][tx] = B[ty*n + TILE_WIDTH*n*i + Col];
else
subTileB[ty][tx] = 0;
__syncthreads();
if (Row < m && Col < n)
for (int j = 0; j < TILE_WIDTH; ++j)
Cvalue += subTileA[ty][j] * subTileB[j][tx];
__syncthreads();
}
if (Row < m && Col < n)
C[Row*n + Col] = Cvalue;
}
void midT(char transa, char transb, \
int m, int n, int k, \
float alpha, \
const float *A, int lda, \
const float *B, int ldb, \
float beta, \
float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
const unsigned int BLOCK_SIZE = 32;
unsigned int grid_y = (unsigned int) ceil((double)m / (double)BLOCK_SIZE);
unsigned int grid_x = (unsigned int) ceil((double)n / (double)BLOCK_SIZE);
dim3 gridDim(grid_x, grid_y);
dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE);
kernelT<<<gridDim, blockDim>>>(m, n, k, A, B, C);
}
int main (int argc, char *argv[])
{
Timer timer;
printf("\nRunning Tiled..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned matArow, matAcol;
unsigned matBrow, matBcol;
dim3 dim_grid, dim_block;
if (argc == 1) {
matArow = 1000;
matAcol = matBrow = 1000;
matBcol = 1000;
} else if (argc == 2) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[1]);
matBcol = atoi(argv[1]);
} else if (argc == 4) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[2]);
matBcol = atoi(argv[3]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./sgemm # All matrices are 1000 x 1000"
"\n Usage: ./sgemm <m> # All matrices are m x m"
"\n Usage: ./sgemm <m> <k> <n> # A: m x k, B: k x n, C: m x n"
"\n");
exit(0);
}
A_sz = matArow*matAcol;
B_sz = matBrow*matBcol;
C_sz = matArow*matBcol;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) A_h[i] = (rand()%100)/100.00;
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) B_h[i] = (rand()%100)/100.00;
C_h = (float*) malloc( sizeof(float)*C_sz );
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n",
matArow, matAcol, matBrow, matBcol, matArow, matBcol);
cudaMalloc((void **) &A_d, sizeof(float)*A_sz);
cudaMalloc((void **) &B_d, sizeof(float)*B_sz);
cudaMalloc((void **) &C_d, sizeof(float)*C_sz);
cudaDeviceSynchronize();
cudaMemcpy(A_d, A_h, sizeof(float)*A_sz, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, sizeof(float)*B_sz, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
midT('N', 'N', matArow, matBcol, matBrow, 1.0f, \
A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow);
cudaDeviceSynchronize();
cudaMemcpy(C_h, C_d, sizeof(float)*C_sz, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, matArow, matAcol, matBcol);
free(A_h);
free(B_h);
free(C_h);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
} |
a0e7fdb9d4fa3b86e84700e529187ee7fdd8faaf.hip | // !!! This is a file automatically generated by hipify!!!
//#include <stdio.h>
//#include <stdlib.h>
//#include <time.h>
//
//#include "common.h"
//#include "cuda_common.cuh"
//
//#include "hip/hip_runtime.h"
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
//
//#define BLOCK_SIZE 1024
//
//template<unsigned int iblock_size>
//__global__ void reduction_gmem_benchmark(int * input, int * temp, int size)
//{
// int tid = threadIdx.x;
// int * i_data = input + blockDim.x * blockIdx.x;
//
// //manual unrolling depending on block size
// if (iblock_size >= 1024 && tid < 512)
// i_data[tid] += i_data[tid + 512];
//
// __syncthreads();
//
// if (iblock_size >= 512 && tid < 256)
// i_data[tid] += i_data[tid + 256];
//
// __syncthreads();
//
// if (iblock_size >= 256 && tid < 128)
// i_data[tid] += i_data[tid + 128];
//
// __syncthreads();
//
// if (iblock_size >= 128 && tid < 64)
// i_data[tid] += i_data[tid + 64];
//
// __syncthreads();
//
// //unrolling warp
// if (tid < 32)
// {
// volatile int * vsmem = i_data;
// vsmem[tid] += vsmem[tid + 32];
// vsmem[tid] += vsmem[tid + 16];
// vsmem[tid] += vsmem[tid + 8];
// vsmem[tid] += vsmem[tid + 4];
// vsmem[tid] += vsmem[tid + 2];
// vsmem[tid] += vsmem[tid + 1];
// }
//
// if (tid == 0)
// {
// temp[blockIdx.x] = i_data[0];
// }
//}
//
//template<unsigned int iblock_size>
//__global__ void reduction_smem(int * input, int * temp, int size)
//{
// __shared__ int smem[BLOCK_SIZE];
// int tid = threadIdx.x;
// int * i_data = input + blockDim.x * blockIdx.x;
//
// smem[tid] = i_data[tid];
//
// __syncthreads();
//
// // in-place reduction in shared memory
// if (blockDim.x >= 1024 && tid < 512)
// smem[tid] += smem[tid + 512];
// __syncthreads();
//
// if (blockDim.x >= 512 && tid < 256)
// smem[tid] += smem[tid + 256];
// __syncthreads();
//
// if (blockDim.x >= 256 && tid < 128)
// smem[tid] += smem[tid + 128];
// __syncthreads();
//
// if (blockDim.x >= 128 && tid < 64)
// smem[tid] += smem[tid + 64];
// __syncthreads();
//
// //unrolling warp
// if (tid < 32)
// {
// volatile int * vsmem = smem;
// vsmem[tid] += vsmem[tid + 32];
// vsmem[tid] += vsmem[tid + 16];
// vsmem[tid] += vsmem[tid + 8];
// vsmem[tid] += vsmem[tid + 4];
// vsmem[tid] += vsmem[tid + 2];
// vsmem[tid] += vsmem[tid + 1];
// }
//
// if (tid == 0)
// {
// temp[blockIdx.x] = smem[0];
// }
//}
//
//template<unsigned int iblock_size>
//__global__ void reduction_smem_complete_unroll(int * input, int * temp, int size)
//{
// __shared__ int smem[BLOCK_SIZE];
// // set thread ID
// unsigned int tid = threadIdx.x;
// // global index, 4 blocks of input data processed at a time
// unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// // unrolling 4 blocks
// int tmpSum = 0;
// // boundary check
// if (idx + 3 * blockDim.x <= size)
// {
// int a1 = input[idx];
// int a2 = input[idx + blockDim.x];
// int a3 = input[idx + 2 * blockDim.x];
// int a4 = input[idx + 3 * blockDim.x];
// tmpSum = a1 + a2 + a3 + a4;
// }
//
// smem[tid] = tmpSum;
//
// __syncthreads();
//
// // in-place reduction in shared memory
// if (blockDim.x >= 1024 && tid < 512)
// smem[tid] += smem[tid + 512];
// __syncthreads();
//
// if (blockDim.x >= 512 && tid < 256)
// smem[tid] += smem[tid + 256];
// __syncthreads();
//
// if (blockDim.x >= 256 && tid < 128)
// smem[tid] += smem[tid + 128];
// __syncthreads();
//
// if (blockDim.x >= 128 && tid < 64)
// smem[tid] += smem[tid + 64];
// __syncthreads();
//
// //unrolling warp
// if (tid < 32)
// {
// volatile int * vsmem = smem;
// vsmem[tid] += vsmem[tid + 32];
// vsmem[tid] += vsmem[tid + 16];
// vsmem[tid] += vsmem[tid + 8];
// vsmem[tid] += vsmem[tid + 4];
// vsmem[tid] += vsmem[tid + 2];
// vsmem[tid] += vsmem[tid + 1];
// }
//
// if (tid == 0)
// {
// temp[blockIdx.x] = smem[0];
// }
//}
//
//int main(int argc, char ** argv)
//{
// printf("Running parallel reduction with complete unrolling kernel \n");
//
// int kernel_index = 0;
//
// if (argc >1)
// {
// kernel_index = 1;
// }
//
// int size = 1 << 22;
// int byte_size = size * sizeof(int);
// int block_size = BLOCK_SIZE;
//
// int * h_input, *h_ref;
// h_input = (int*)malloc(byte_size);
//
// initialize(h_input, size);
//
// int cpu_result = reduction_cpu(h_input, size);
//
// dim3 block(block_size);
// dim3 grid((size / block_size));
//
// int temp_array_byte_size = sizeof(int)* grid.x;
//
// h_ref = (int*)malloc(temp_array_byte_size);
//
// int * d_input, *d_temp;
// gpuErrchk(hipMalloc((void**)&d_input, byte_size));
// gpuErrchk(hipMalloc((void**)&d_temp, temp_array_byte_size));
//
// gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
// hipMemcpyHostToDevice));
// if (kernel_index == 0)
// {
// printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
//
// switch (block_size)
// {
// case 1024:
// reduction_smem_complete_unroll <1024> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 512:
// reduction_smem_complete_unroll <512> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 256:
// reduction_smem_complete_unroll <256> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 128:
// reduction_smem_complete_unroll <128> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 64:
// reduction_smem_complete_unroll <64> << < grid, block >> > (d_input, d_temp, size);
// break;
// }
// }
// else if (kernel_index == 1)
// {
// printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
//
// switch (block_size)
// {
// case 1024:
// reduction_smem <1024> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 512:
// reduction_smem <512> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 256:
// reduction_smem <256> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 128:
// reduction_smem <128> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 64:
// reduction_smem <64> << < grid, block >> > (d_input, d_temp, size);
// break;
// }
// }
// else
// {
// grid.x = grid.x / 4;
// printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
//
// switch (block_size)
// {
// case 1024:
// reduction_smem_complete_unroll <1024> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 512:
// reduction_smem_complete_unroll <512> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 256:
// reduction_smem_complete_unroll <256> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 128:
// reduction_smem_complete_unroll <128> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 64:
// reduction_smem_complete_unroll <64> << < grid, block >> > (d_input, d_temp, size);
// break;
// }
// }
// gpuErrchk(hipDeviceSynchronize());
// gpuErrchk(hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost));
//
// int gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
//
// compare_results(gpu_result, cpu_result);
//
// gpuErrchk(hipFree(d_input));
// gpuErrchk(hipFree(d_temp));
// free(h_input);
// free(h_ref);
//
// gpuErrchk(hipDeviceReset());
// return 0;
//} | a0e7fdb9d4fa3b86e84700e529187ee7fdd8faaf.cu | //#include <stdio.h>
//#include <stdlib.h>
//#include <time.h>
//
//#include "common.h"
//#include "cuda_common.cuh"
//
//#include "cuda.h"
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#define BLOCK_SIZE 1024
//
//template<unsigned int iblock_size>
//__global__ void reduction_gmem_benchmark(int * input, int * temp, int size)
//{
// int tid = threadIdx.x;
// int * i_data = input + blockDim.x * blockIdx.x;
//
// //manual unrolling depending on block size
// if (iblock_size >= 1024 && tid < 512)
// i_data[tid] += i_data[tid + 512];
//
// __syncthreads();
//
// if (iblock_size >= 512 && tid < 256)
// i_data[tid] += i_data[tid + 256];
//
// __syncthreads();
//
// if (iblock_size >= 256 && tid < 128)
// i_data[tid] += i_data[tid + 128];
//
// __syncthreads();
//
// if (iblock_size >= 128 && tid < 64)
// i_data[tid] += i_data[tid + 64];
//
// __syncthreads();
//
// //unrolling warp
// if (tid < 32)
// {
// volatile int * vsmem = i_data;
// vsmem[tid] += vsmem[tid + 32];
// vsmem[tid] += vsmem[tid + 16];
// vsmem[tid] += vsmem[tid + 8];
// vsmem[tid] += vsmem[tid + 4];
// vsmem[tid] += vsmem[tid + 2];
// vsmem[tid] += vsmem[tid + 1];
// }
//
// if (tid == 0)
// {
// temp[blockIdx.x] = i_data[0];
// }
//}
//
//template<unsigned int iblock_size>
//__global__ void reduction_smem(int * input, int * temp, int size)
//{
// __shared__ int smem[BLOCK_SIZE];
// int tid = threadIdx.x;
// int * i_data = input + blockDim.x * blockIdx.x;
//
// smem[tid] = i_data[tid];
//
// __syncthreads();
//
// // in-place reduction in shared memory
// if (blockDim.x >= 1024 && tid < 512)
// smem[tid] += smem[tid + 512];
// __syncthreads();
//
// if (blockDim.x >= 512 && tid < 256)
// smem[tid] += smem[tid + 256];
// __syncthreads();
//
// if (blockDim.x >= 256 && tid < 128)
// smem[tid] += smem[tid + 128];
// __syncthreads();
//
// if (blockDim.x >= 128 && tid < 64)
// smem[tid] += smem[tid + 64];
// __syncthreads();
//
// //unrolling warp
// if (tid < 32)
// {
// volatile int * vsmem = smem;
// vsmem[tid] += vsmem[tid + 32];
// vsmem[tid] += vsmem[tid + 16];
// vsmem[tid] += vsmem[tid + 8];
// vsmem[tid] += vsmem[tid + 4];
// vsmem[tid] += vsmem[tid + 2];
// vsmem[tid] += vsmem[tid + 1];
// }
//
// if (tid == 0)
// {
// temp[blockIdx.x] = smem[0];
// }
//}
//
//template<unsigned int iblock_size>
//__global__ void reduction_smem_complete_unroll(int * input, int * temp, int size)
//{
// __shared__ int smem[BLOCK_SIZE];
// // set thread ID
// unsigned int tid = threadIdx.x;
// // global index, 4 blocks of input data processed at a time
// unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// // unrolling 4 blocks
// int tmpSum = 0;
// // boundary check
// if (idx + 3 * blockDim.x <= size)
// {
// int a1 = input[idx];
// int a2 = input[idx + blockDim.x];
// int a3 = input[idx + 2 * blockDim.x];
// int a4 = input[idx + 3 * blockDim.x];
// tmpSum = a1 + a2 + a3 + a4;
// }
//
// smem[tid] = tmpSum;
//
// __syncthreads();
//
// // in-place reduction in shared memory
// if (blockDim.x >= 1024 && tid < 512)
// smem[tid] += smem[tid + 512];
// __syncthreads();
//
// if (blockDim.x >= 512 && tid < 256)
// smem[tid] += smem[tid + 256];
// __syncthreads();
//
// if (blockDim.x >= 256 && tid < 128)
// smem[tid] += smem[tid + 128];
// __syncthreads();
//
// if (blockDim.x >= 128 && tid < 64)
// smem[tid] += smem[tid + 64];
// __syncthreads();
//
// //unrolling warp
// if (tid < 32)
// {
// volatile int * vsmem = smem;
// vsmem[tid] += vsmem[tid + 32];
// vsmem[tid] += vsmem[tid + 16];
// vsmem[tid] += vsmem[tid + 8];
// vsmem[tid] += vsmem[tid + 4];
// vsmem[tid] += vsmem[tid + 2];
// vsmem[tid] += vsmem[tid + 1];
// }
//
// if (tid == 0)
// {
// temp[blockIdx.x] = smem[0];
// }
//}
//
//int main(int argc, char ** argv)
//{
// printf("Running parallel reduction with complete unrolling kernel \n");
//
// int kernel_index = 0;
//
// if (argc >1)
// {
// kernel_index = 1;
// }
//
// int size = 1 << 22;
// int byte_size = size * sizeof(int);
// int block_size = BLOCK_SIZE;
//
// int * h_input, *h_ref;
// h_input = (int*)malloc(byte_size);
//
// initialize(h_input, size);
//
// int cpu_result = reduction_cpu(h_input, size);
//
// dim3 block(block_size);
// dim3 grid((size / block_size));
//
// int temp_array_byte_size = sizeof(int)* grid.x;
//
// h_ref = (int*)malloc(temp_array_byte_size);
//
// int * d_input, *d_temp;
// gpuErrchk(cudaMalloc((void**)&d_input, byte_size));
// gpuErrchk(cudaMalloc((void**)&d_temp, temp_array_byte_size));
//
// gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
// cudaMemcpyHostToDevice));
// if (kernel_index == 0)
// {
// printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
//
// switch (block_size)
// {
// case 1024:
// reduction_smem_complete_unroll <1024> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 512:
// reduction_smem_complete_unroll <512> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 256:
// reduction_smem_complete_unroll <256> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 128:
// reduction_smem_complete_unroll <128> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 64:
// reduction_smem_complete_unroll <64> << < grid, block >> > (d_input, d_temp, size);
// break;
// }
// }
// else if (kernel_index == 1)
// {
// printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
//
// switch (block_size)
// {
// case 1024:
// reduction_smem <1024> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 512:
// reduction_smem <512> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 256:
// reduction_smem <256> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 128:
// reduction_smem <128> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 64:
// reduction_smem <64> << < grid, block >> > (d_input, d_temp, size);
// break;
// }
// }
// else
// {
// grid.x = grid.x / 4;
// printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
//
// switch (block_size)
// {
// case 1024:
// reduction_smem_complete_unroll <1024> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 512:
// reduction_smem_complete_unroll <512> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 256:
// reduction_smem_complete_unroll <256> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 128:
// reduction_smem_complete_unroll <128> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 64:
// reduction_smem_complete_unroll <64> << < grid, block >> > (d_input, d_temp, size);
// break;
// }
// }
// gpuErrchk(cudaDeviceSynchronize());
// gpuErrchk(cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost));
//
// int gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
//
// compare_results(gpu_result, cpu_result);
//
// gpuErrchk(cudaFree(d_input));
// gpuErrchk(cudaFree(d_temp));
// free(h_input);
// free(h_ref);
//
// gpuErrchk(cudaDeviceReset());
// return 0;
//} |
037ba6309277252ef4ef4298ea01e9f7b0ce5e28.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*2];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*2;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*2+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*2+0];
float y1=xyz[(i*n+j)*2+1];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&2);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*2+2]-x1;
float y2=buf[k*2+3]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*2+4]-x1;
float y2=buf[k*2+5]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*2+6]-x1;
float y2=buf[k*2+7]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*2+2]-x1;
float y2=buf[k*2+3]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*2+4]-x1;
float y2=buf[k*2+5]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*2+6]-x1;
float y2=buf[k*2+7]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, hipStream_t stream){
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, batch_size, n, xyz1.data<float>(), m, xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, batch_size, m, xyz2.data<float>(), n, xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*2+0];
float y1=xyz1[(i*n+j)*2+1];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*2+0];
float y2=xyz2[(i*m+j2)*2+1];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*2+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*2+1]),g*(y1-y2));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+1]),-(g*(y1-y2)));
}
}
}
// int chamfer_cuda_backward(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2, hipStream_t stream){
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2){
// hipMemset(grad_xyz1,0,b*n*3*4);
// hipMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, batch_size,n,xyz1.data<float>(),m,xyz2.data<float>(),graddist1.data<float>(),idx1.data<int>(),gradxyz1.data<float>(),gradxyz2.data<float>());
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, batch_size,m,xyz2.data<float>(),n,xyz1.data<float>(),graddist2.data<float>(),idx2.data<int>(),gradxyz2.data<float>(),gradxyz1.data<float>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd get grad: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
| 037ba6309277252ef4ef4298ea01e9f7b0ce5e28.cu |
#include <stdio.h>
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*2];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*2;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*2+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*2+0];
float y1=xyz[(i*n+j)*2+1];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&2);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*2+2]-x1;
float y2=buf[k*2+3]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*2+4]-x1;
float y2=buf[k*2+5]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*2+6]-x1;
float y2=buf[k*2+7]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*2+2]-x1;
float y2=buf[k*2+3]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*2+4]-x1;
float y2=buf[k*2+5]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*2+6]-x1;
float y2=buf[k*2+7]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, cudaStream_t stream){
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
NmDistanceKernel<<<dim3(32,16,1),512>>>(batch_size, n, xyz1.data<float>(), m, xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
NmDistanceKernel<<<dim3(32,16,1),512>>>(batch_size, m, xyz2.data<float>(), n, xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*2+0];
float y1=xyz1[(i*n+j)*2+1];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*2+0];
float y2=xyz2[(i*m+j2)*2+1];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*2+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*2+1]),g*(y1-y2));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+1]),-(g*(y1-y2)));
}
}
}
// int chamfer_cuda_backward(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2, cudaStream_t stream){
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2){
// cudaMemset(grad_xyz1,0,b*n*3*4);
// cudaMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(batch_size,n,xyz1.data<float>(),m,xyz2.data<float>(),graddist1.data<float>(),idx1.data<int>(),gradxyz1.data<float>(),gradxyz2.data<float>());
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(batch_size,m,xyz2.data<float>(),n,xyz1.data<float>(),graddist2.data<float>(),idx2.data<int>(),gradxyz2.data<float>(),gradxyz1.data<float>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd get grad: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
|
e45422e9b1299e56bb3d149554476ec8d4fc99c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <hip/hip_runtime.h>
void
sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int i = 0; i < ny; i++)
{
for (int j = 0; j < nx; j++)
{
ic[j] = ia[j] + ib[j];
}
ia += nx;
ib += nx;
ic += nx;
}
}
__global__ void
sumMatrixOnGPU(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + (blockIdx.x * blockDim.x);
unsigned int iy = threadIdx.y + (blockIdx.y * blockDim.y);
unsigned int idx = ix + (iy * nx);
if (ix < nx && iy < ny) MatC[idx] = MatA[idx] + MatB[idx];
else printf("WHAT: ix >= nx || iy >= ny\n");
}
void devConfig(const int devId) {
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, devId);
printf("Using Device %d: %s\n", devId, devProp.name);
hipSetDevice(devId);
}
void
initalData(float *matrix, const int nxy)
{
for (int i = 0; i < nxy; i++)
matrix[i] = 0;
}
int
main(int argc, char *argv[])
{
devConfig(0);
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
clock_t iStart = clock();
initalData(h_A, nxy);
initalData(h_B, nxy);
clock_t iEnd = clock();
// TODO : Store time
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
iStart = clock();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iEnd = clock();
// TODO : Store time
// TODO : Alloc device global memory
// TODO : Transfer data to device
// TODO : Config and envoke kernel
// TODO : Sum matrix on GPU (TODO : Time it)
// TODO : Copy back computed GPU data
// TODO : Compare host and GPU
// TODO : Host and Device clean up
return (0);
}
| e45422e9b1299e56bb3d149554476ec8d4fc99c0.cu | #include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
void
sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int i = 0; i < ny; i++)
{
for (int j = 0; j < nx; j++)
{
ic[j] = ia[j] + ib[j];
}
ia += nx;
ib += nx;
ic += nx;
}
}
__global__ void
sumMatrixOnGPU(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + (blockIdx.x * blockDim.x);
unsigned int iy = threadIdx.y + (blockIdx.y * blockDim.y);
unsigned int idx = ix + (iy * nx);
if (ix < nx && iy < ny) MatC[idx] = MatA[idx] + MatB[idx];
else printf("WHAT: ix >= nx || iy >= ny\n");
}
void devConfig(const int devId) {
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, devId);
printf("Using Device %d: %s\n", devId, devProp.name);
cudaSetDevice(devId);
}
void
initalData(float *matrix, const int nxy)
{
for (int i = 0; i < nxy; i++)
matrix[i] = 0;
}
int
main(int argc, char *argv[])
{
devConfig(0);
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
clock_t iStart = clock();
initalData(h_A, nxy);
initalData(h_B, nxy);
clock_t iEnd = clock();
// TODO : Store time
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
iStart = clock();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iEnd = clock();
// TODO : Store time
// TODO : Alloc device global memory
// TODO : Transfer data to device
// TODO : Config and envoke kernel
// TODO : Sum matrix on GPU (TODO : Time it)
// TODO : Copy back computed GPU data
// TODO : Compare host and GPU
// TODO : Host and Device clean up
return (0);
}
|
47fb4d11439ce861ed4cde1b1f03dc6b831d7416.hip | // !!! This is a file automatically generated by hipify!!!
#include "helper.h"
// SNAP TRANSFORM SIZE
// transforms data size, use in convolution FFT 2D
int snapTransformSize(int dataSize){
int hiBit;
unsigned int lowPOT, hiPOT;
dataSize = iAlignUp(dataSize, 16);
for (hiBit = 31; hiBit >= 0; hiBit--){
if (dataSize & (1U << hiBit)){
break;
}
}
lowPOT = 1U << hiBit;
if (lowPOT == (unsigned int)dataSize){
return dataSize;
}
hiPOT = 1U << (hiBit + 1);
if (hiPOT <= 1024){
return hiPOT;
}else{
return iAlignUp(dataSize, 512);
}
}
// COPY VECTOR
// DEEP copy between vectors of pointers
void copyVector(const std::vector<std::vector<Mat*> >& _from, std::vector<std::vector<Mat*> >& _to){
releaseVector(_to);
_to.clear();
_to.resize(_from.size());
for(int i = 0; i < _to.size(); ++i){
_to[i].clear();
_to[i].resize(_from[i].size());
for(int j = 0; j < _to[i].size(); ++j){
_to[i][j] = new Mat();
_from[i][j] -> copyTo(*(_to[i][j]));
}
}
}
void copyVector(const std::vector<Mat*>& _from, std::vector<Mat*>& _to){
releaseVector(_to);
_to.clear();
_to.resize(_from.size());
for(int i = 0; i < _to.size(); ++i){
_to[i] = new Mat();
_from[i] -> copyTo(*(_to[i]));
}
}
void copyVector(const std::vector<std::vector<vector3f*> >& _from, std::vector<std::vector<vector3f*> >& _to){
releaseVector(_to);
_to.clear();
_to.resize(_from.size());
for(int i = 0; i < _to.size(); ++i){
_to[i].clear();
_to[i].resize(_from[i].size());
for(int j = 0; j < _to[i].size(); ++j){
_to[i][j] = new vector3f();
_from[i][j] -> copyTo(*(_to[i][j]));
}
}
}
void copyVector(const std::vector<vector3f*>& _from, std::vector<vector3f*>& _to){
releaseVector(_to);
_to.clear();
_to.resize(_from.size());
for(int i = 0; i < _to.size(); ++i){
_to[i] = new vector3f();
_from[i] -> copyTo(*(_to[i]));
}
}
// RELEASE VECTOR
// release vector of Mat, vector3f, cpuMat, or Mat*, vector3f*, cpuMat*. (first make sure the pointer is not NULL before you free it)
void releaseVector(std::vector<std::vector<Mat*> >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
if(vec[i][j]) vec[i][j] -> release();
}
}
}
void releaseVector(std::vector<std::vector<Mat> >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
vec[i][j].release();
}
}
}
void releaseVector(std::vector<Mat*>& vec){
for(int i = 0; i < vec.size(); ++i){
if(vec[i]) vec[i] -> release();
}
}
void releaseVector(std::vector<Mat>& vec){
for(int i = 0; i < vec.size(); ++i){
vec[i].release();
}
}
void releaseVector(std::vector<std::vector<cpuMat*> >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
if(vec[i][j]) vec[i][j] -> release();
}
}
}
void releaseVector(std::vector<std::vector<cpuMat> >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
vec[i][j].release();
}
}
}
void releaseVector(std::vector<cpuMat*>& vec){
for(int i = 0; i < vec.size(); ++i){
if(vec[i]) vec[i] -> release();
}
}
void releaseVector(std::vector<cpuMat>& vec){
for(int i = 0; i < vec.size(); ++i){
vec[i].release();
}
}
void releaseVector(std::vector<std::vector<std::vector<vector3f*> > >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
for(int k = 0; k < vec[i][j].size(); ++k){
if(vec[i][j][k]) vec[i][j][k] -> release();
}
}
}
}
void releaseVector(std::vector<std::vector<std::vector<vector3f> > >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
for(int k = 0; k < vec[i][j].size(); ++k){
vec[i][j][k].release();
}
}
}
}
void releaseVector(std::vector<std::vector<vector3f*> >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
if(vec[i][j]) vec[i][j] -> release();
}
}
}
void releaseVector(std::vector<std::vector<vector3f> >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
vec[i][j].release();
}
}
}
void releaseVector(std::vector<vector3f*>& vec){
for(int i = 0; i < vec.size(); ++i){
if(vec[i]) vec[i] -> release();
}
}
void releaseVector(std::vector<vector3f>& vec){
for(int i = 0; i < vec.size(); ++i){
vec[i].release();
}
}
// SHOW GPU PROPERTY
// cuda function, shows gpu properties
void showGpuProperty(){
hipDeviceProp_t prop;
int count;
checkCudaErrors(hipGetDeviceCount(&count));
for(int i = 0; i < count; ++i){
hipGetDeviceProperties(&prop, i);
cout<<"--- general information for device "<<i<<endl;
cout<<"name: "<<prop.name<<endl;
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap) printf( "Enabled\n" );
else printf( "Disabled\n" );
printf( "Kernel execition timeout : " );
if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" );
else printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n", prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] );
printf( "\n" );
}
}
| 47fb4d11439ce861ed4cde1b1f03dc6b831d7416.cu | #include "helper.h"
// SNAP TRANSFORM SIZE
// transforms data size, use in convolution FFT 2D
int snapTransformSize(int dataSize){
int hiBit;
unsigned int lowPOT, hiPOT;
dataSize = iAlignUp(dataSize, 16);
for (hiBit = 31; hiBit >= 0; hiBit--){
if (dataSize & (1U << hiBit)){
break;
}
}
lowPOT = 1U << hiBit;
if (lowPOT == (unsigned int)dataSize){
return dataSize;
}
hiPOT = 1U << (hiBit + 1);
if (hiPOT <= 1024){
return hiPOT;
}else{
return iAlignUp(dataSize, 512);
}
}
// COPY VECTOR
// DEEP copy between vectors of pointers
void copyVector(const std::vector<std::vector<Mat*> >& _from, std::vector<std::vector<Mat*> >& _to){
releaseVector(_to);
_to.clear();
_to.resize(_from.size());
for(int i = 0; i < _to.size(); ++i){
_to[i].clear();
_to[i].resize(_from[i].size());
for(int j = 0; j < _to[i].size(); ++j){
_to[i][j] = new Mat();
_from[i][j] -> copyTo(*(_to[i][j]));
}
}
}
void copyVector(const std::vector<Mat*>& _from, std::vector<Mat*>& _to){
releaseVector(_to);
_to.clear();
_to.resize(_from.size());
for(int i = 0; i < _to.size(); ++i){
_to[i] = new Mat();
_from[i] -> copyTo(*(_to[i]));
}
}
void copyVector(const std::vector<std::vector<vector3f*> >& _from, std::vector<std::vector<vector3f*> >& _to){
releaseVector(_to);
_to.clear();
_to.resize(_from.size());
for(int i = 0; i < _to.size(); ++i){
_to[i].clear();
_to[i].resize(_from[i].size());
for(int j = 0; j < _to[i].size(); ++j){
_to[i][j] = new vector3f();
_from[i][j] -> copyTo(*(_to[i][j]));
}
}
}
void copyVector(const std::vector<vector3f*>& _from, std::vector<vector3f*>& _to){
releaseVector(_to);
_to.clear();
_to.resize(_from.size());
for(int i = 0; i < _to.size(); ++i){
_to[i] = new vector3f();
_from[i] -> copyTo(*(_to[i]));
}
}
// RELEASE VECTOR
// release vector of Mat, vector3f, cpuMat, or Mat*, vector3f*, cpuMat*. (first make sure the pointer is not NULL before you free it)
void releaseVector(std::vector<std::vector<Mat*> >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
if(vec[i][j]) vec[i][j] -> release();
}
}
}
void releaseVector(std::vector<std::vector<Mat> >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
vec[i][j].release();
}
}
}
void releaseVector(std::vector<Mat*>& vec){
for(int i = 0; i < vec.size(); ++i){
if(vec[i]) vec[i] -> release();
}
}
void releaseVector(std::vector<Mat>& vec){
for(int i = 0; i < vec.size(); ++i){
vec[i].release();
}
}
void releaseVector(std::vector<std::vector<cpuMat*> >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
if(vec[i][j]) vec[i][j] -> release();
}
}
}
void releaseVector(std::vector<std::vector<cpuMat> >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
vec[i][j].release();
}
}
}
void releaseVector(std::vector<cpuMat*>& vec){
for(int i = 0; i < vec.size(); ++i){
if(vec[i]) vec[i] -> release();
}
}
void releaseVector(std::vector<cpuMat>& vec){
for(int i = 0; i < vec.size(); ++i){
vec[i].release();
}
}
void releaseVector(std::vector<std::vector<std::vector<vector3f*> > >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
for(int k = 0; k < vec[i][j].size(); ++k){
if(vec[i][j][k]) vec[i][j][k] -> release();
}
}
}
}
void releaseVector(std::vector<std::vector<std::vector<vector3f> > >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
for(int k = 0; k < vec[i][j].size(); ++k){
vec[i][j][k].release();
}
}
}
}
void releaseVector(std::vector<std::vector<vector3f*> >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
if(vec[i][j]) vec[i][j] -> release();
}
}
}
void releaseVector(std::vector<std::vector<vector3f> >& vec){
for(int i = 0; i < vec.size(); ++i){
for(int j = 0; j < vec[i].size(); ++j){
vec[i][j].release();
}
}
}
void releaseVector(std::vector<vector3f*>& vec){
for(int i = 0; i < vec.size(); ++i){
if(vec[i]) vec[i] -> release();
}
}
void releaseVector(std::vector<vector3f>& vec){
for(int i = 0; i < vec.size(); ++i){
vec[i].release();
}
}
// SHOW GPU PROPERTY
// cuda function, shows gpu properties
void showGpuProperty(){
cudaDeviceProp prop;
int count;
checkCudaErrors(cudaGetDeviceCount(&count));
for(int i = 0; i < count; ++i){
cudaGetDeviceProperties(&prop, i);
cout<<"--- general information for device "<<i<<endl;
cout<<"name: "<<prop.name<<endl;
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap) printf( "Enabled\n" );
else printf( "Disabled\n" );
printf( "Kernel execition timeout : " );
if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" );
else printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n", prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] );
printf( "\n" );
}
}
|
c98a00481a0a516d5079de373d59ceeb28b41492.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static inline __device__ void atomicAdd(double *address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull;
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
}
#endif
namespace {
template <typename scalar_t>
__global__ void pspDist3d_cuda_forward_kernel(
const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> pFeat,
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> spFeat,
const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> init_spIndx,
torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> sqdist,
int depth, int length, int height, int width, int Kl, int Kh, int Kw, int K) {
// indexing
const int n = blockIdx.y;
int d = blockIdx.x * blockDim.x + threadIdx.x;
const int HW = height * width;
const int LHW = length * HW;
const int c = d / LHW;
d %= LHW;
const int l = d / HW;
d %= HW;
const int h = d / width;
const int w = d % width;
const int init_spix_idx = static_cast<int>(init_spIndx[n][0][l][h][w]);
int spix_idx = init_spix_idx;
if (c < 27) {
// Convert spix_idx based on the association channel
const int rel_idx = c;
const int rel_idx_l = rel_idx / 9 - 1;
int rel_idx_h = (rel_idx % 9) / 3 - 1;
int rel_idx_w = (rel_idx % 9) % 3 - 1;
bool invalid_spixel = false;
const int Khw = Kh * Kw;
int spix_idx_l = init_spix_idx + rel_idx_l * Khw;
if (spix_idx_l >= K || spix_idx_l <= -1) {
spix_idx_l = init_spix_idx;
invalid_spixel = true;
}
if (((spix_idx_l + Kw) % Khw) == 0 && rel_idx_h == 1) {
rel_idx_h = 0;
invalid_spixel = true;
} else if ((spix_idx_l % Khw) == 0 && rel_idx_h == -1) {
rel_idx_h = 0;
invalid_spixel = true;
}
int spix_idx_h = spix_idx_l + rel_idx_h * Kw;
if (spix_idx_h >= K || spix_idx_h <= -1) {
spix_idx_h = spix_idx_l;
invalid_spixel = true;
}
if (((spix_idx_h + 1) % Kw) == 0 && rel_idx_w == 1) {
rel_idx_w = 0;
invalid_spixel = true;
} else if ((spix_idx_h % Kw) == 0 && rel_idx_w == -1) {
rel_idx_w = 0;
invalid_spixel = true;
}
int spix_idx_w = spix_idx_h + rel_idx_w;
if (spix_idx_w < K && spix_idx_w > -1) {
spix_idx = spix_idx_w;
} else {
spix_idx = spix_idx_h;
invalid_spixel = true;
}
// compute squared distance
scalar_t sq_dist = 0;
if (invalid_spixel) {
sq_dist = 10000.0;
} else {
for (int k = 0; k < depth; k++) {
sq_dist += pow(pFeat[n][k][l][h][w] - spFeat[n][k][spix_idx], 2);
}
}
sqdist[n][c][l][h][w] = sq_dist;
}
}
template <typename scalar_t>
__global__ void pspDist3d_cuda_backward_kernel(
const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> grad_sqdist,
torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> grad_pFeat,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> grad_spFeat,
const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> pFeat,
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> spFeat,
const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> init_spIndx,
int depth, int length, int height, int width, int Kl, int Kh, int Kw, int K) {
// indexing
const int n = blockIdx.y;
int d = blockIdx.x * blockDim.x + threadIdx.x;
const int HW = height * width;
const int LHW = length * HW;
const int c = d / LHW;
d %= LHW;
const int l = d / HW;
d %= HW;
const int h = d / width;
const int w = d % width;
const int init_spix_idx = static_cast<int>(init_spIndx[n][0][l][h][w]);
int spix_idx = init_spix_idx;
if (c < 27) {
// Convert spix_idx based on the association channel
const int rel_idx = c;
const int rel_idx_l = rel_idx / 9 - 1;
int rel_idx_h = (rel_idx % 9) / 3 - 1;
int rel_idx_w = (rel_idx % 9) % 3 - 1;
bool invalid_spixel = false;
const int Khw = Kh * Kw;
int spix_idx_l = init_spix_idx + rel_idx_l * Khw;
if (spix_idx_l >= K || spix_idx_l <= -1) {
spix_idx_l = init_spix_idx;
invalid_spixel = true;
}
if (((spix_idx_l + Kw) % Khw) == 0 && rel_idx_h == 1) {
rel_idx_h = 0;
invalid_spixel = true;
} else if ((spix_idx_l % Khw) == 0 && rel_idx_h == -1) {
rel_idx_h = 0;
invalid_spixel = true;
}
int spix_idx_h = spix_idx_l + rel_idx_h * Kw;
if (spix_idx_h >= K || spix_idx_h <= -1) {
spix_idx_h = spix_idx_l;
invalid_spixel = true;
}
if (((spix_idx_h + 1) % Kw) == 0 && rel_idx_w == 1) {
rel_idx_w = 0;
invalid_spixel = true;
} else if ((spix_idx_h % Kw) == 0 && rel_idx_w == -1) {
rel_idx_w = 0;
invalid_spixel = true;
}
int spix_idx_w = spix_idx_h + rel_idx_w;
if (spix_idx_w < K && spix_idx_w > -1) {
spix_idx = spix_idx_w;
} else {
spix_idx = spix_idx_h;
invalid_spixel = true;
}
//
if ( !invalid_spixel ) {
for (int k = 0; k < depth; k++) {
scalar_t _grad_pFeat = grad_sqdist[n][c][l][h][w] * 2 * (pFeat[n][k][l][h][w] - spFeat[n][k][spix_idx]);
atomicAdd(&grad_pFeat[n][k][l][h][w], _grad_pFeat);
atomicAdd(&grad_spFeat[n][k][spix_idx], -_grad_pFeat);
}
}
}
}
} // namespace
torch::Tensor pspDist3d_cuda_forward(
const torch::Tensor pFeat, // B C L H W
const torch::Tensor spFeat, // B C K
const torch::Tensor init_spIndx, // B 1 L H W
const int Kl,
const int Kh,
const int Kw) {
// setup
const auto batch_size = pFeat.size(0);
const auto depth = pFeat.size(1);
const auto length = pFeat.size(2);
const auto height = pFeat.size(3);
const auto width = pFeat.size(4);
const int K = Kl * Kh * Kw;
auto sqdist = torch::zeros({batch_size, 27, length, height, width},
torch::TensorOptions().dtype(pFeat.dtype()).device(pFeat.device()).requires_grad(true)); // B 27 L H W
// launch kernel
const int threads = 1024;
const dim3 blocks((27 * length * height * width + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(pFeat.type(), "pspDist3d_forward_cuda", ([&] {
hipLaunchKernelGGL(( pspDist3d_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
pFeat.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
spFeat.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
init_spIndx.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
sqdist.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
depth, length, height, width, Kl, Kh, Kw, K);
}));
return sqdist;
}
std::vector<torch::Tensor> pspDist3d_cuda_backward(
const torch::Tensor grad_sqdist, // B 27 L H W
const torch::Tensor pFeat, // B C L H W
const torch::Tensor spFeat, // B C K
const torch::Tensor init_spIndx, // B 1 L H W
const int Kl,
const int Kh,
const int Kw) {
// setup
const auto batch_size = pFeat.size(0);
const auto depth = pFeat.size(1);
const auto length = pFeat.size(2);
const auto height = pFeat.size(3);
const auto width = pFeat.size(4);
const int K = Kl * Kh * Kw;
auto grad_pFeat = torch::zeros_like(pFeat).set_requires_grad(false);
auto grad_spFeat = torch::zeros_like(spFeat).set_requires_grad(false);
// launch kernel
const int threads = 1024;
const dim3 blocks((27 * length * height * width + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(spFeat.type(), "pspDist3d_backward_cuda", ([&] {
hipLaunchKernelGGL(( pspDist3d_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
grad_sqdist.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
grad_pFeat.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
grad_spFeat.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
pFeat.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
spFeat.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
init_spIndx.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
depth, length, height, width, Kl, Kh, Kw, K);
}));
return {grad_pFeat, grad_spFeat};
} | c98a00481a0a516d5079de373d59ceeb28b41492.cu | #include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static inline __device__ void atomicAdd(double *address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull;
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
}
#endif
namespace {
template <typename scalar_t>
__global__ void pspDist3d_cuda_forward_kernel(
const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> pFeat,
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> spFeat,
const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> init_spIndx,
torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> sqdist,
int depth, int length, int height, int width, int Kl, int Kh, int Kw, int K) {
// indexing
const int n = blockIdx.y;
int d = blockIdx.x * blockDim.x + threadIdx.x;
const int HW = height * width;
const int LHW = length * HW;
const int c = d / LHW;
d %= LHW;
const int l = d / HW;
d %= HW;
const int h = d / width;
const int w = d % width;
const int init_spix_idx = static_cast<int>(init_spIndx[n][0][l][h][w]);
int spix_idx = init_spix_idx;
if (c < 27) {
// Convert spix_idx based on the association channel
const int rel_idx = c;
const int rel_idx_l = rel_idx / 9 - 1;
int rel_idx_h = (rel_idx % 9) / 3 - 1;
int rel_idx_w = (rel_idx % 9) % 3 - 1;
bool invalid_spixel = false;
const int Khw = Kh * Kw;
int spix_idx_l = init_spix_idx + rel_idx_l * Khw;
if (spix_idx_l >= K || spix_idx_l <= -1) {
spix_idx_l = init_spix_idx;
invalid_spixel = true;
}
if (((spix_idx_l + Kw) % Khw) == 0 && rel_idx_h == 1) {
rel_idx_h = 0;
invalid_spixel = true;
} else if ((spix_idx_l % Khw) == 0 && rel_idx_h == -1) {
rel_idx_h = 0;
invalid_spixel = true;
}
int spix_idx_h = spix_idx_l + rel_idx_h * Kw;
if (spix_idx_h >= K || spix_idx_h <= -1) {
spix_idx_h = spix_idx_l;
invalid_spixel = true;
}
if (((spix_idx_h + 1) % Kw) == 0 && rel_idx_w == 1) {
rel_idx_w = 0;
invalid_spixel = true;
} else if ((spix_idx_h % Kw) == 0 && rel_idx_w == -1) {
rel_idx_w = 0;
invalid_spixel = true;
}
int spix_idx_w = spix_idx_h + rel_idx_w;
if (spix_idx_w < K && spix_idx_w > -1) {
spix_idx = spix_idx_w;
} else {
spix_idx = spix_idx_h;
invalid_spixel = true;
}
// compute squared distance
scalar_t sq_dist = 0;
if (invalid_spixel) {
sq_dist = 10000.0;
} else {
for (int k = 0; k < depth; k++) {
sq_dist += pow(pFeat[n][k][l][h][w] - spFeat[n][k][spix_idx], 2);
}
}
sqdist[n][c][l][h][w] = sq_dist;
}
}
template <typename scalar_t>
__global__ void pspDist3d_cuda_backward_kernel(
const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> grad_sqdist,
torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> grad_pFeat,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> grad_spFeat,
const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> pFeat,
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> spFeat,
const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> init_spIndx,
int depth, int length, int height, int width, int Kl, int Kh, int Kw, int K) {
// indexing
const int n = blockIdx.y;
int d = blockIdx.x * blockDim.x + threadIdx.x;
const int HW = height * width;
const int LHW = length * HW;
const int c = d / LHW;
d %= LHW;
const int l = d / HW;
d %= HW;
const int h = d / width;
const int w = d % width;
const int init_spix_idx = static_cast<int>(init_spIndx[n][0][l][h][w]);
int spix_idx = init_spix_idx;
if (c < 27) {
// Convert spix_idx based on the association channel
const int rel_idx = c;
const int rel_idx_l = rel_idx / 9 - 1;
int rel_idx_h = (rel_idx % 9) / 3 - 1;
int rel_idx_w = (rel_idx % 9) % 3 - 1;
bool invalid_spixel = false;
const int Khw = Kh * Kw;
int spix_idx_l = init_spix_idx + rel_idx_l * Khw;
if (spix_idx_l >= K || spix_idx_l <= -1) {
spix_idx_l = init_spix_idx;
invalid_spixel = true;
}
if (((spix_idx_l + Kw) % Khw) == 0 && rel_idx_h == 1) {
rel_idx_h = 0;
invalid_spixel = true;
} else if ((spix_idx_l % Khw) == 0 && rel_idx_h == -1) {
rel_idx_h = 0;
invalid_spixel = true;
}
int spix_idx_h = spix_idx_l + rel_idx_h * Kw;
if (spix_idx_h >= K || spix_idx_h <= -1) {
spix_idx_h = spix_idx_l;
invalid_spixel = true;
}
if (((spix_idx_h + 1) % Kw) == 0 && rel_idx_w == 1) {
rel_idx_w = 0;
invalid_spixel = true;
} else if ((spix_idx_h % Kw) == 0 && rel_idx_w == -1) {
rel_idx_w = 0;
invalid_spixel = true;
}
int spix_idx_w = spix_idx_h + rel_idx_w;
if (spix_idx_w < K && spix_idx_w > -1) {
spix_idx = spix_idx_w;
} else {
spix_idx = spix_idx_h;
invalid_spixel = true;
}
//
if ( !invalid_spixel ) {
for (int k = 0; k < depth; k++) {
scalar_t _grad_pFeat = grad_sqdist[n][c][l][h][w] * 2 * (pFeat[n][k][l][h][w] - spFeat[n][k][spix_idx]);
atomicAdd(&grad_pFeat[n][k][l][h][w], _grad_pFeat);
atomicAdd(&grad_spFeat[n][k][spix_idx], -_grad_pFeat);
}
}
}
}
} // namespace
torch::Tensor pspDist3d_cuda_forward(
const torch::Tensor pFeat, // B C L H W
const torch::Tensor spFeat, // B C K
const torch::Tensor init_spIndx, // B 1 L H W
const int Kl,
const int Kh,
const int Kw) {
// setup
const auto batch_size = pFeat.size(0);
const auto depth = pFeat.size(1);
const auto length = pFeat.size(2);
const auto height = pFeat.size(3);
const auto width = pFeat.size(4);
const int K = Kl * Kh * Kw;
auto sqdist = torch::zeros({batch_size, 27, length, height, width},
torch::TensorOptions().dtype(pFeat.dtype()).device(pFeat.device()).requires_grad(true)); // B 27 L H W
// launch kernel
const int threads = 1024;
const dim3 blocks((27 * length * height * width + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(pFeat.type(), "pspDist3d_forward_cuda", ([&] {
pspDist3d_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
pFeat.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
spFeat.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
init_spIndx.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
sqdist.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
depth, length, height, width, Kl, Kh, Kw, K);
}));
return sqdist;
}
std::vector<torch::Tensor> pspDist3d_cuda_backward(
const torch::Tensor grad_sqdist, // B 27 L H W
const torch::Tensor pFeat, // B C L H W
const torch::Tensor spFeat, // B C K
const torch::Tensor init_spIndx, // B 1 L H W
const int Kl,
const int Kh,
const int Kw) {
// setup
const auto batch_size = pFeat.size(0);
const auto depth = pFeat.size(1);
const auto length = pFeat.size(2);
const auto height = pFeat.size(3);
const auto width = pFeat.size(4);
const int K = Kl * Kh * Kw;
auto grad_pFeat = torch::zeros_like(pFeat).set_requires_grad(false);
auto grad_spFeat = torch::zeros_like(spFeat).set_requires_grad(false);
// launch kernel
const int threads = 1024;
const dim3 blocks((27 * length * height * width + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(spFeat.type(), "pspDist3d_backward_cuda", ([&] {
pspDist3d_cuda_backward_kernel<scalar_t><<<blocks, threads>>>(
grad_sqdist.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
grad_pFeat.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
grad_spFeat.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
pFeat.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
spFeat.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
init_spIndx.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(),
depth, length, height, width, Kl, Kh, Kw, K);
}));
return {grad_pFeat, grad_spFeat};
} |
d76b27a13b5619cb9fc342e9147ee72a39934814.hip | // !!! This is a file automatically generated by hipify!!!
/**
*/
#include "../util/cudaUtil.h"
#define CUDA
#include "cudaKernel.h"
#undef CUDA
/**
* cudaLaunchKernel function
*
* \brief This function uses the specified parameters to execute the
* coprocessor function specified by kernel. Any parameters to
* the kernel should be included in params.
*
* \param kernel A string naming the kernel to execute
* \param dims The dimensions of the data to execute the kernel on
* \param nParams The number of parameters to be passed to the kernel
* \param params Array of parameters to be passed to the kernel
* \param gDim The grid dimensions of the kernel
* \param bDim The block dimensions of the kernel
* \param locMem The amount of local or shared memory to allocate for kernel
* execution
* \param stream The stream index to associate this kernel's execution and
* data with
* \return None
*
*/
extern "C" void cudaLaunchKernel( const char * krn, int * dims, int nParams, int * paramSizes, void ** params,
int * gDim, int * bDim, int locMem, HTaskInfo * info) {
hTaskLoc loc = info->location;
int dev = info->device;
int proc = info->process;
size_t offset = 0;
dim3 gridDims = dim3(gDim[HLENGTH],gDim[HWIDTH],gDim[HDEPTH]);
dim3 blockDims = dim3(bDim[HLENGTH],bDim[HWIDTH],bDim[HDEPTH]);
// Set the device and configure the kernel call to a stream
cudaCheckErr(hipSetDevice(dev),__LINE__,__FILE__);
cudaCheckErr(hipConfigureCall(gridDims, blockDims, locMem, (hipStream_t)proc),__LINE__,__FILE__);
// For each kernel parameter passed in, push the kernel argument onto the stack
// turn into internal function
float *s;
hipMalloc(&s,sizeof(float)*2);
params[0]=&s;
for (int i = 0; i < nParams; i++) {
cudaCheckErr(hipSetupArgument(params[i], paramSizes[i], offset),__LINE__,__FILE__);
offset = offset + paramSizes[i];
}
/*
if (!strcmp(krn,"scale")) {
printf("Launching %s on dev:%d\n", krn, dev);
cudaCheckErr(hipLaunch(scale),__LINE__,__FILE__);
}*/
cudaCheckErr(hipGetLastError(),__LINE__,__FILE__);
if ((proc != 0) && (proc != -1)) { hipStreamSynchronize((hipStream_t)proc); }
//hipDeviceSynchronize();
cudaCheckErr(hipDeviceSynchronize(),__LINE__,__FILE__);
printf("proc= %d run to here\n", proc);
cudaCheckErr(hipGetLastError(),__LINE__,__FILE__);
return;
}
| d76b27a13b5619cb9fc342e9147ee72a39934814.cu | /**
*/
#include "../util/cudaUtil.h"
#define CUDA
#include "cudaKernel.h"
#undef CUDA
/**
* cudaLaunchKernel function
*
* \brief This function uses the specified parameters to execute the
* coprocessor function specified by kernel. Any parameters to
* the kernel should be included in params.
*
* \param kernel A string naming the kernel to execute
* \param dims The dimensions of the data to execute the kernel on
* \param nParams The number of parameters to be passed to the kernel
* \param params Array of parameters to be passed to the kernel
* \param gDim The grid dimensions of the kernel
* \param bDim The block dimensions of the kernel
* \param locMem The amount of local or shared memory to allocate for kernel
* execution
* \param stream The stream index to associate this kernel's execution and
* data with
* \return None
*
*/
extern "C" void cudaLaunchKernel( const char * krn, int * dims, int nParams, int * paramSizes, void ** params,
int * gDim, int * bDim, int locMem, HTaskInfo * info) {
hTaskLoc loc = info->location;
int dev = info->device;
int proc = info->process;
size_t offset = 0;
dim3 gridDims = dim3(gDim[HLENGTH],gDim[HWIDTH],gDim[HDEPTH]);
dim3 blockDims = dim3(bDim[HLENGTH],bDim[HWIDTH],bDim[HDEPTH]);
// Set the device and configure the kernel call to a stream
cudaCheckErr(cudaSetDevice(dev),__LINE__,__FILE__);
cudaCheckErr(cudaConfigureCall(gridDims, blockDims, locMem, (cudaStream_t)proc),__LINE__,__FILE__);
// For each kernel parameter passed in, push the kernel argument onto the stack
// turn into internal function
float *s;
cudaMalloc(&s,sizeof(float)*2);
params[0]=&s;
for (int i = 0; i < nParams; i++) {
cudaCheckErr(cudaSetupArgument(params[i], paramSizes[i], offset),__LINE__,__FILE__);
offset = offset + paramSizes[i];
}
/*
if (!strcmp(krn,"scale")) {
printf("Launching %s on dev:%d\n", krn, dev);
cudaCheckErr(cudaLaunch(scale),__LINE__,__FILE__);
}*/
cudaCheckErr(cudaGetLastError(),__LINE__,__FILE__);
if ((proc != 0) && (proc != -1)) { cudaStreamSynchronize((cudaStream_t)proc); }
//cudaThreadSynchronize();
cudaCheckErr(cudaDeviceSynchronize(),__LINE__,__FILE__);
printf("proc= %d run to here\n", proc);
cudaCheckErr(cudaGetLastError(),__LINE__,__FILE__);
return;
}
|
83594d896fcf5f84b46e12f2365aa9b8c3bfe21f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "convolutions.cuh"
__global__
void convolution_deep_sh(const float* in, const float* kernel, float* out,
const int3 im_shape, const int3 k_shape)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
size_t z = blockIdx.z * blockDim.z + threadIdx.z;
extern __shared__ float skernel[];
size_t lidx = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if ( lidx < k_shape.z )
{
skernel[lidx] = kernel[lidx];
}
__syncthreads();
if ( x >= im_shape.x || y >= im_shape.y || z > im_shape.z - k_shape.z )
return;
float sum = 0;
size_t k;
size_t pz;
for ( k = 0; k < k_shape.z; k++ )
{
pz = z + k;
sum += in[pz * im_shape.y * im_shape.x + y * im_shape.x + x] * skernel[k];
}
out[(z+k_shape.z/2) * im_shape.y * im_shape.x + y * im_shape.x + x] = sum;
}
__global__
void convolution_rows_sh(const float* in, const float* kernel, float* out,
const int3 im_shape, const int3 k_shape)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
size_t z = blockIdx.z * blockDim.z + threadIdx.z;
extern __shared__ float skernel[];
size_t lidx = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if ( lidx < k_shape.y )
{
skernel[lidx] = kernel[lidx];
}
__syncthreads();
if ( x >= im_shape.x || y > im_shape.y - k_shape.y || z >= im_shape.z )
return;
float sum = 0;
size_t i;
size_t py;
for ( i = 0; i < k_shape.y; i++ )
{
py = y + i;
sum += in[z * im_shape.y * im_shape.x + py * im_shape.x + x] * skernel[i];
}
out[z * im_shape.y * im_shape.x + (y+k_shape.y/2) * im_shape.x + x] = sum;
}
__global__
void convolution_cols_sh(const float* in, const float* kernel, float* out,
const int3 im_shape, const int3 k_shape)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
size_t z = blockIdx.z * blockDim.z + threadIdx.z;
extern __shared__ float skernel[];
size_t lidx = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if ( lidx < k_shape.x )
{
skernel[lidx] = kernel[lidx];
}
__syncthreads();
if ( x > im_shape.x - k_shape.x || y >= im_shape.y || z >= im_shape.z )
return;
float sum = 0;
size_t j;
size_t px;
for ( j = 0; j < k_shape.x; j++ )
{
px = x + j;
sum += in[z * im_shape.y * im_shape.x + y * im_shape.x + px] * skernel[j];
}
out[z * im_shape.y * im_shape.x + y * im_shape.x + (x+k_shape.x/2)] = sum;
}
__global__
void clamp_result_sh(const float* in, float* out,
const int3 im_shape, const int3 k_shape, const int3 r_shape)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
size_t z = blockIdx.z * blockDim.z + threadIdx.z;
if ( x >= r_shape.x || y >= r_shape.y || z >= r_shape.z )
return;
size_t pz = z + k_shape.z/2;
size_t py = y + k_shape.y/2;
size_t px = x + k_shape.x/2;
float result = in[pz * im_shape.y * im_shape.x + py * im_shape.x + px];
out[z * r_shape.y * r_shape.x + y * r_shape.x + x] = result;
}
__global__
void add_result_sh(const float* in, float* out,
const int3 im_shape, const int3 k_shape, const int3 r_shape)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
size_t z = blockIdx.z * blockDim.z + threadIdx.z;
if ( x >= r_shape.x || y >= r_shape.y || z >= r_shape.z )
return;
size_t pz = z + k_shape.z/2;
size_t py = y + k_shape.y/2;
size_t px = x + k_shape.x/2;
float result = in[pz * im_shape.y * im_shape.x + py * im_shape.x + px];
out[z * r_shape.y * r_shape.x + y * r_shape.x + x] += result;
}
// Main function
void convolution_separable_shared(const float *h_src, const float *h_kernelz,
const float *h_kernely, const float *h_kernelx,
float *h_dest, const int3 ishape, const int3 kshape,
int gpu)
{
// Init cuda memory
int max_threads = initCuda(gpu);
int max_threads_dim = (int)(floor(pow(max_threads, 1./3.)));
int3 total;
total.x = ishape.x - kshape.x + 1;
total.y = ishape.y - kshape.y + 1;
total.z = ishape.z - kshape.z + 1;
float *d_src1, *d_src2, *d_kernelz, *d_kernely, *d_kernelx, *d_dest;
size_t d_mem_size = ishape.z * ishape.y * ishape.x * sizeof(float);
size_t r_mem_size = total.z * total.y * total.x * sizeof(float);
int3 k_mem_size;
k_mem_size.z = kshape.z * sizeof(float);
k_mem_size.y = kshape.y * sizeof(float);
k_mem_size.x = kshape.x * sizeof(float);
hipMalloc((float **) &d_src1, d_mem_size);
hipMemcpy(d_src1, h_src, d_mem_size, hipMemcpyHostToDevice);
hipMalloc((float **) &d_src2, d_mem_size);
hipMemcpy(d_src2, d_src1, d_mem_size, hipMemcpyDeviceToDevice);
hipMalloc((float **) &d_kernelz, k_mem_size.z);
hipMemcpy(d_kernelz, h_kernelz, k_mem_size.z, hipMemcpyHostToDevice);
hipMalloc((float **) &d_kernely, k_mem_size.y);
hipMemcpy(d_kernely, h_kernely, k_mem_size.y, hipMemcpyHostToDevice);
hipMalloc((float **) &d_kernelx, k_mem_size.x);
hipMemcpy(d_kernelx, h_kernelx, k_mem_size.x, hipMemcpyHostToDevice);
hipMalloc((float **) &d_dest, r_mem_size);
cudaCheckErrors("SRC & KERNEL & DST");
// bdim and gdim
dim3 threads;
threads.x = total.x < max_threads_dim? total.x : max_threads_dim;
threads.y = total.y < max_threads_dim? total.y : max_threads_dim;
threads.z = total.z < max_threads_dim? total.z : max_threads_dim;
dim3 gridz((ishape.x + threads.x - 1) / threads.x, \
(ishape.y + threads.y - 1) / threads.y, \
(total.z + threads.z - 1) / threads.z);
dim3 gridy((ishape.x + threads.x - 1) / threads.x, \
(total.y + threads.y - 1) / threads.y, \
(ishape.z + threads.z - 1) / threads.z);
dim3 gridx((total.x + threads.x - 1) / threads.x, \
(ishape.y + threads.y - 1) / threads.y, \
(ishape.z + threads.z - 1) / threads.z);
dim3 grida((total.x + threads.x - 1) / threads.x, \
(total.y + threads.y - 1) / threads.y, \
(total.z + threads.z - 1) / threads.z);
hipLaunchKernelGGL(( convolution_deep_sh), dim3(gridz), dim3(threads), k_mem_size.z, 0, d_src1, d_kernelz, d_src2, ishape, kshape);
hipLaunchKernelGGL(( convolution_rows_sh), dim3(gridy), dim3(threads), k_mem_size.y, 0, d_src2, d_kernely, d_src1, ishape, kshape);
hipLaunchKernelGGL(( convolution_cols_sh), dim3(gridx), dim3(threads), k_mem_size.x, 0, d_src1, d_kernelx, d_src2, ishape, kshape);
cudaCheckErrors("Convolution");
hipLaunchKernelGGL(( clamp_result_sh), dim3(gridx), dim3(threads), 0, 0, d_src2, d_dest, ishape, kshape, total);
hipMemcpy(h_dest, d_dest, r_mem_size, hipMemcpyDeviceToHost);
cudaCheckErrors("Memcpy back");
hipFree(d_src1);
hipFree(d_src2);
hipFree(d_kernelz);
hipFree(d_kernely);
hipFree(d_kernelx);
hipFree(d_dest);
cudaCheckErrors("Free everything");
hipDeviceReset();
}
void n_convolution_separable_shared(const float *h_src, const float *h_kernels, float *h_dest,
const int3 ishape, const int3 kshape, const int n_kernels,
int gpu)
{
// Init cuda memory
int max_threads = initCuda(gpu);
int max_threads_dim = (int)(floor(pow(max_threads, 1./3.)));
int3 total;
total.x = ishape.x - kshape.x + 1;
total.y = ishape.y - kshape.y + 1;
total.z = ishape.z - kshape.z + 1;
float *d_im, *d_src1, *d_src2, *d_kernelz, *d_kernely, *d_kernelx, *d_dest;
size_t d_mem_size = ishape.z * ishape.y * ishape.x * sizeof(float);
size_t r_mem_size = total.z * total.y * total.x * sizeof(float);
int3 k_mem_size;
k_mem_size.z = kshape.z * sizeof(float);
k_mem_size.y = kshape.y * sizeof(float);
k_mem_size.x = kshape.x * sizeof(float);
hipMalloc((float **) &d_im, d_mem_size);
hipMemcpy(d_im, h_src, d_mem_size, hipMemcpyHostToDevice);
hipMalloc((float **) &d_src1, d_mem_size);
hipMemcpy(d_src1, d_im, d_mem_size, hipMemcpyDeviceToDevice);
hipMalloc((float **) &d_src2, d_mem_size);
hipMemcpy(d_src2, d_src1, d_mem_size, hipMemcpyDeviceToDevice);
hipMalloc((float **) &d_kernelz, k_mem_size.z);
hipMalloc((float **) &d_kernely, k_mem_size.y);
hipMalloc((float **) &d_kernelx, k_mem_size.x);
hipMalloc((float **) &d_dest, r_mem_size);
hipMemset(d_dest, 0, r_mem_size);
cudaCheckErrors("SRC & KERNEL & DST");
// bdim and gdim
dim3 threads;
threads.x = total.x < max_threads_dim? total.x : max_threads_dim;
threads.y = total.y < max_threads_dim? total.y : max_threads_dim;
threads.z = total.z < max_threads_dim? total.z : max_threads_dim;
dim3 gridz((ishape.x + threads.x - 1) / threads.x, \
(ishape.y + threads.y - 1) / threads.y, \
(total.z + threads.z - 1) / threads.z);
dim3 gridy((ishape.x + threads.x - 1) / threads.x, \
(total.y + threads.y - 1) / threads.y, \
(ishape.z + threads.z - 1) / threads.z);
dim3 gridx((total.x + threads.x - 1) / threads.x, \
(ishape.y + threads.y - 1) / threads.y, \
(ishape.z + threads.z - 1) / threads.z);
dim3 grida((total.x + threads.x - 1) / threads.x, \
(total.y + threads.y - 1) / threads.y, \
(total.z + threads.z - 1) / threads.z);
for ( int i = 0; i < n_kernels; i++ )
{
hipMemcpy(d_kernelz, h_kernels, k_mem_size.z, hipMemcpyHostToDevice);
h_kernels += kshape.z;
hipMemcpy(d_kernely, h_kernels, k_mem_size.y, hipMemcpyHostToDevice);
h_kernels += kshape.y;
hipMemcpy(d_kernelx, h_kernels, k_mem_size.x, hipMemcpyHostToDevice);
h_kernels += kshape.x;
hipLaunchKernelGGL(( convolution_deep_sh), dim3(gridz), dim3(threads), k_mem_size.z, 0, d_im, d_kernelz, d_src1, ishape, kshape);
hipLaunchKernelGGL(( convolution_rows_sh), dim3(gridy), dim3(threads), k_mem_size.y, 0, d_src1, d_kernely, d_src2, ishape, kshape);
hipLaunchKernelGGL(( convolution_cols_sh), dim3(gridx), dim3(threads), k_mem_size.x, 0, d_src2, d_kernelx, d_src1, ishape, kshape);
cudaCheckErrors("Convolution");
hipLaunchKernelGGL(( add_result_sh), dim3(gridx), dim3(threads), 0, 0, d_src1, d_dest, ishape, kshape, total);
}
hipMemcpy(h_dest, d_dest, r_mem_size, hipMemcpyDeviceToHost);
cudaCheckErrors("Memcpy back");
hipFree(d_im);
hipFree(d_src1);
hipFree(d_src2);
hipFree(d_kernelz);
hipFree(d_kernely);
hipFree(d_kernelx);
hipFree(d_dest);
cudaCheckErrors("Free everything");
hipDeviceReset();
}
| 83594d896fcf5f84b46e12f2365aa9b8c3bfe21f.cu |
#include "convolutions.cuh"
__global__
void convolution_deep_sh(const float* in, const float* kernel, float* out,
const int3 im_shape, const int3 k_shape)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
size_t z = blockIdx.z * blockDim.z + threadIdx.z;
extern __shared__ float skernel[];
size_t lidx = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if ( lidx < k_shape.z )
{
skernel[lidx] = kernel[lidx];
}
__syncthreads();
if ( x >= im_shape.x || y >= im_shape.y || z > im_shape.z - k_shape.z )
return;
float sum = 0;
size_t k;
size_t pz;
for ( k = 0; k < k_shape.z; k++ )
{
pz = z + k;
sum += in[pz * im_shape.y * im_shape.x + y * im_shape.x + x] * skernel[k];
}
out[(z+k_shape.z/2) * im_shape.y * im_shape.x + y * im_shape.x + x] = sum;
}
__global__
void convolution_rows_sh(const float* in, const float* kernel, float* out,
const int3 im_shape, const int3 k_shape)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
size_t z = blockIdx.z * blockDim.z + threadIdx.z;
extern __shared__ float skernel[];
size_t lidx = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if ( lidx < k_shape.y )
{
skernel[lidx] = kernel[lidx];
}
__syncthreads();
if ( x >= im_shape.x || y > im_shape.y - k_shape.y || z >= im_shape.z )
return;
float sum = 0;
size_t i;
size_t py;
for ( i = 0; i < k_shape.y; i++ )
{
py = y + i;
sum += in[z * im_shape.y * im_shape.x + py * im_shape.x + x] * skernel[i];
}
out[z * im_shape.y * im_shape.x + (y+k_shape.y/2) * im_shape.x + x] = sum;
}
__global__
void convolution_cols_sh(const float* in, const float* kernel, float* out,
const int3 im_shape, const int3 k_shape)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
size_t z = blockIdx.z * blockDim.z + threadIdx.z;
extern __shared__ float skernel[];
size_t lidx = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if ( lidx < k_shape.x )
{
skernel[lidx] = kernel[lidx];
}
__syncthreads();
if ( x > im_shape.x - k_shape.x || y >= im_shape.y || z >= im_shape.z )
return;
float sum = 0;
size_t j;
size_t px;
for ( j = 0; j < k_shape.x; j++ )
{
px = x + j;
sum += in[z * im_shape.y * im_shape.x + y * im_shape.x + px] * skernel[j];
}
out[z * im_shape.y * im_shape.x + y * im_shape.x + (x+k_shape.x/2)] = sum;
}
__global__
void clamp_result_sh(const float* in, float* out,
const int3 im_shape, const int3 k_shape, const int3 r_shape)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
size_t z = blockIdx.z * blockDim.z + threadIdx.z;
if ( x >= r_shape.x || y >= r_shape.y || z >= r_shape.z )
return;
size_t pz = z + k_shape.z/2;
size_t py = y + k_shape.y/2;
size_t px = x + k_shape.x/2;
float result = in[pz * im_shape.y * im_shape.x + py * im_shape.x + px];
out[z * r_shape.y * r_shape.x + y * r_shape.x + x] = result;
}
__global__
void add_result_sh(const float* in, float* out,
const int3 im_shape, const int3 k_shape, const int3 r_shape)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
size_t z = blockIdx.z * blockDim.z + threadIdx.z;
if ( x >= r_shape.x || y >= r_shape.y || z >= r_shape.z )
return;
size_t pz = z + k_shape.z/2;
size_t py = y + k_shape.y/2;
size_t px = x + k_shape.x/2;
float result = in[pz * im_shape.y * im_shape.x + py * im_shape.x + px];
out[z * r_shape.y * r_shape.x + y * r_shape.x + x] += result;
}
// Main function
void convolution_separable_shared(const float *h_src, const float *h_kernelz,
const float *h_kernely, const float *h_kernelx,
float *h_dest, const int3 ishape, const int3 kshape,
int gpu)
{
// Init cuda memory
int max_threads = initCuda(gpu);
int max_threads_dim = (int)(floor(pow(max_threads, 1./3.)));
int3 total;
total.x = ishape.x - kshape.x + 1;
total.y = ishape.y - kshape.y + 1;
total.z = ishape.z - kshape.z + 1;
float *d_src1, *d_src2, *d_kernelz, *d_kernely, *d_kernelx, *d_dest;
size_t d_mem_size = ishape.z * ishape.y * ishape.x * sizeof(float);
size_t r_mem_size = total.z * total.y * total.x * sizeof(float);
int3 k_mem_size;
k_mem_size.z = kshape.z * sizeof(float);
k_mem_size.y = kshape.y * sizeof(float);
k_mem_size.x = kshape.x * sizeof(float);
cudaMalloc((float **) &d_src1, d_mem_size);
cudaMemcpy(d_src1, h_src, d_mem_size, cudaMemcpyHostToDevice);
cudaMalloc((float **) &d_src2, d_mem_size);
cudaMemcpy(d_src2, d_src1, d_mem_size, cudaMemcpyDeviceToDevice);
cudaMalloc((float **) &d_kernelz, k_mem_size.z);
cudaMemcpy(d_kernelz, h_kernelz, k_mem_size.z, cudaMemcpyHostToDevice);
cudaMalloc((float **) &d_kernely, k_mem_size.y);
cudaMemcpy(d_kernely, h_kernely, k_mem_size.y, cudaMemcpyHostToDevice);
cudaMalloc((float **) &d_kernelx, k_mem_size.x);
cudaMemcpy(d_kernelx, h_kernelx, k_mem_size.x, cudaMemcpyHostToDevice);
cudaMalloc((float **) &d_dest, r_mem_size);
cudaCheckErrors("SRC & KERNEL & DST");
// bdim and gdim
dim3 threads;
threads.x = total.x < max_threads_dim? total.x : max_threads_dim;
threads.y = total.y < max_threads_dim? total.y : max_threads_dim;
threads.z = total.z < max_threads_dim? total.z : max_threads_dim;
dim3 gridz((ishape.x + threads.x - 1) / threads.x, \
(ishape.y + threads.y - 1) / threads.y, \
(total.z + threads.z - 1) / threads.z);
dim3 gridy((ishape.x + threads.x - 1) / threads.x, \
(total.y + threads.y - 1) / threads.y, \
(ishape.z + threads.z - 1) / threads.z);
dim3 gridx((total.x + threads.x - 1) / threads.x, \
(ishape.y + threads.y - 1) / threads.y, \
(ishape.z + threads.z - 1) / threads.z);
dim3 grida((total.x + threads.x - 1) / threads.x, \
(total.y + threads.y - 1) / threads.y, \
(total.z + threads.z - 1) / threads.z);
convolution_deep_sh<<<gridz, threads, k_mem_size.z>>>(d_src1, d_kernelz, d_src2, ishape, kshape);
convolution_rows_sh<<<gridy, threads, k_mem_size.y>>>(d_src2, d_kernely, d_src1, ishape, kshape);
convolution_cols_sh<<<gridx, threads, k_mem_size.x>>>(d_src1, d_kernelx, d_src2, ishape, kshape);
cudaCheckErrors("Convolution");
clamp_result_sh<<<gridx, threads>>>(d_src2, d_dest, ishape, kshape, total);
cudaMemcpy(h_dest, d_dest, r_mem_size, cudaMemcpyDeviceToHost);
cudaCheckErrors("Memcpy back");
cudaFree(d_src1);
cudaFree(d_src2);
cudaFree(d_kernelz);
cudaFree(d_kernely);
cudaFree(d_kernelx);
cudaFree(d_dest);
cudaCheckErrors("Free everything");
cudaDeviceReset();
}
void n_convolution_separable_shared(const float *h_src, const float *h_kernels, float *h_dest,
const int3 ishape, const int3 kshape, const int n_kernels,
int gpu)
{
// Init cuda memory
int max_threads = initCuda(gpu);
int max_threads_dim = (int)(floor(pow(max_threads, 1./3.)));
int3 total;
total.x = ishape.x - kshape.x + 1;
total.y = ishape.y - kshape.y + 1;
total.z = ishape.z - kshape.z + 1;
float *d_im, *d_src1, *d_src2, *d_kernelz, *d_kernely, *d_kernelx, *d_dest;
size_t d_mem_size = ishape.z * ishape.y * ishape.x * sizeof(float);
size_t r_mem_size = total.z * total.y * total.x * sizeof(float);
int3 k_mem_size;
k_mem_size.z = kshape.z * sizeof(float);
k_mem_size.y = kshape.y * sizeof(float);
k_mem_size.x = kshape.x * sizeof(float);
cudaMalloc((float **) &d_im, d_mem_size);
cudaMemcpy(d_im, h_src, d_mem_size, cudaMemcpyHostToDevice);
cudaMalloc((float **) &d_src1, d_mem_size);
cudaMemcpy(d_src1, d_im, d_mem_size, cudaMemcpyDeviceToDevice);
cudaMalloc((float **) &d_src2, d_mem_size);
cudaMemcpy(d_src2, d_src1, d_mem_size, cudaMemcpyDeviceToDevice);
cudaMalloc((float **) &d_kernelz, k_mem_size.z);
cudaMalloc((float **) &d_kernely, k_mem_size.y);
cudaMalloc((float **) &d_kernelx, k_mem_size.x);
cudaMalloc((float **) &d_dest, r_mem_size);
cudaMemset(d_dest, 0, r_mem_size);
cudaCheckErrors("SRC & KERNEL & DST");
// bdim and gdim
dim3 threads;
threads.x = total.x < max_threads_dim? total.x : max_threads_dim;
threads.y = total.y < max_threads_dim? total.y : max_threads_dim;
threads.z = total.z < max_threads_dim? total.z : max_threads_dim;
dim3 gridz((ishape.x + threads.x - 1) / threads.x, \
(ishape.y + threads.y - 1) / threads.y, \
(total.z + threads.z - 1) / threads.z);
dim3 gridy((ishape.x + threads.x - 1) / threads.x, \
(total.y + threads.y - 1) / threads.y, \
(ishape.z + threads.z - 1) / threads.z);
dim3 gridx((total.x + threads.x - 1) / threads.x, \
(ishape.y + threads.y - 1) / threads.y, \
(ishape.z + threads.z - 1) / threads.z);
dim3 grida((total.x + threads.x - 1) / threads.x, \
(total.y + threads.y - 1) / threads.y, \
(total.z + threads.z - 1) / threads.z);
for ( int i = 0; i < n_kernels; i++ )
{
cudaMemcpy(d_kernelz, h_kernels, k_mem_size.z, cudaMemcpyHostToDevice);
h_kernels += kshape.z;
cudaMemcpy(d_kernely, h_kernels, k_mem_size.y, cudaMemcpyHostToDevice);
h_kernels += kshape.y;
cudaMemcpy(d_kernelx, h_kernels, k_mem_size.x, cudaMemcpyHostToDevice);
h_kernels += kshape.x;
convolution_deep_sh<<<gridz, threads, k_mem_size.z>>>(d_im, d_kernelz, d_src1, ishape, kshape);
convolution_rows_sh<<<gridy, threads, k_mem_size.y>>>(d_src1, d_kernely, d_src2, ishape, kshape);
convolution_cols_sh<<<gridx, threads, k_mem_size.x>>>(d_src2, d_kernelx, d_src1, ishape, kshape);
cudaCheckErrors("Convolution");
add_result_sh<<<gridx, threads>>>(d_src1, d_dest, ishape, kshape, total);
}
cudaMemcpy(h_dest, d_dest, r_mem_size, cudaMemcpyDeviceToHost);
cudaCheckErrors("Memcpy back");
cudaFree(d_im);
cudaFree(d_src1);
cudaFree(d_src2);
cudaFree(d_kernelz);
cudaFree(d_kernely);
cudaFree(d_kernelx);
cudaFree(d_dest);
cudaCheckErrors("Free everything");
cudaDeviceReset();
}
|
09e8978e7542069dc1c0939e3306bc7a75d8eca0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/relu_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "runtime/device/gpu/cuda_common.h"
template <typename T>
__global__ void CalReLUKernel(int size, T *input_addr, T *output_addr) {
for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
output_addr[pos] = input_addr[pos] > static_cast<T>(0) ? input_addr[pos] : static_cast<T>(0);
}
}
template <typename T>
void CalReLU(int size, T *input_addr, T *output_addr, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( CalReLUKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input_addr, output_addr);
return;
}
template void CalReLU(int size, double *input_addr, double *output_addr, hipStream_t cuda_stream);
template void CalReLU(int size, float *input_addr, float *output_addr, hipStream_t cuda_stream);
template void CalReLU(int size, half *input_addr, half *output_addr, hipStream_t cuda_stream);
template void CalReLU(int size, int8_t *input_addr, int8_t *output_addr, hipStream_t cuda_stream);
template void CalReLU(int size, int16_t *input_addr, int16_t *output_addr, hipStream_t cuda_stream);
template void CalReLU(int size, int32_t *input_addr, int32_t *output_addr, hipStream_t cuda_stream);
template void CalReLU(int size, int64_t *input_addr, int64_t *output_addr, hipStream_t cuda_stream);
template void CalReLU(int size, uint8_t *input_addr, uint8_t *output_addr, hipStream_t cuda_stream);
template <typename T>
__global__ void ReluV2Kernel(const size_t num, const T *x, T *y, uint32_t *mask) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += blockDim.x * gridDim.x) {
T v = x[i];
bool p = v > static_cast<T>(0);
y[i] = p ? v : static_cast<T>(0);
auto warp_predict = BallotSync(p, __activemask());
if (LaneId() == 0) {
mask[WarpId(i)] = warp_predict;
}
}
}
template <typename T>
void ReluV2(const size_t num, const T *x, T *y, uint32_t *mask, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( ReluV2Kernel), dim3(kBlocksPerGrid(num)), dim3(kThreadsPerBlock), 0, cuda_stream, num, x, y, mask);
}
template <typename T>
__global__ void ReluGradV2Kernel(const size_t num, const T *dy, const uint32_t *mask, T *dx) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += blockDim.x * gridDim.x) {
bool p = mask[WarpId(i)] & (1 << LaneId());
dx[i] = p ? dy[i] : static_cast<T>(0);
}
}
template <typename T>
void ReluGradV2(const size_t num, const T *dy, const uint32_t *mask, T *dx, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( ReluGradV2Kernel), dim3(kBlocksPerGrid(num)), dim3(kThreadsPerBlock), 0, cuda_stream, num, dy, mask, dx);
}
template void ReluV2(const size_t num, const double *x, double *y, uint32_t *mask, hipStream_t cuda_stream);
template void ReluV2(const size_t num, const float *x, float *y, uint32_t *mask, hipStream_t cuda_stream);
template void ReluV2(const size_t num, const half *x, half *y, uint32_t *mask, hipStream_t cuda_stream);
template void ReluV2(const size_t num, const int8_t *x, int8_t *y, uint32_t *mask, hipStream_t cuda_stream);
template void ReluV2(const size_t num, const int16_t *x, int16_t *y, uint32_t *mask, hipStream_t cuda_stream);
template void ReluV2(const size_t num, const int32_t *x, int32_t *y, uint32_t *mask, hipStream_t cuda_stream);
template void ReluV2(const size_t num, const int64_t *x, int64_t *y, uint32_t *mask, hipStream_t cuda_stream);
template void ReluV2(const size_t num, const uint8_t *x, uint8_t *y, uint32_t *mask, hipStream_t cuda_stream);
template void ReluGradV2(const size_t num, const double *dy, const uint32_t *mask, double *dx,
hipStream_t cuda_stream);
template void ReluGradV2(const size_t num, const float *dy, const uint32_t *mask, float *dx, hipStream_t cuda_stream);
template void ReluGradV2(const size_t num, const half *dy, const uint32_t *mask, half *dx, hipStream_t cuda_stream);
template void ReluGradV2(const size_t num, const int8_t *dy, const uint32_t *mask, int8_t *dx,
hipStream_t cuda_stream);
template void ReluGradV2(const size_t num, const int16_t *dy, const uint32_t *mask, int16_t *dx,
hipStream_t cuda_stream);
template void ReluGradV2(const size_t num, const int32_t *dy, const uint32_t *mask, int32_t *dx,
hipStream_t cuda_stream);
template void ReluGradV2(const size_t num, const int64_t *dy, const uint32_t *mask, int64_t *dx,
hipStream_t cuda_stream);
template void ReluGradV2(const size_t num, const uint8_t *dy, const uint32_t *mask, uint8_t *dx,
hipStream_t cuda_stream);
| 09e8978e7542069dc1c0939e3306bc7a75d8eca0.cu | /**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/relu_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "runtime/device/gpu/cuda_common.h"
template <typename T>
__global__ void CalReLUKernel(int size, T *input_addr, T *output_addr) {
for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
output_addr[pos] = input_addr[pos] > static_cast<T>(0) ? input_addr[pos] : static_cast<T>(0);
}
}
template <typename T>
void CalReLU(int size, T *input_addr, T *output_addr, cudaStream_t cuda_stream) {
CalReLUKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input_addr, output_addr);
return;
}
template void CalReLU(int size, double *input_addr, double *output_addr, cudaStream_t cuda_stream);
template void CalReLU(int size, float *input_addr, float *output_addr, cudaStream_t cuda_stream);
template void CalReLU(int size, half *input_addr, half *output_addr, cudaStream_t cuda_stream);
template void CalReLU(int size, int8_t *input_addr, int8_t *output_addr, cudaStream_t cuda_stream);
template void CalReLU(int size, int16_t *input_addr, int16_t *output_addr, cudaStream_t cuda_stream);
template void CalReLU(int size, int32_t *input_addr, int32_t *output_addr, cudaStream_t cuda_stream);
template void CalReLU(int size, int64_t *input_addr, int64_t *output_addr, cudaStream_t cuda_stream);
template void CalReLU(int size, uint8_t *input_addr, uint8_t *output_addr, cudaStream_t cuda_stream);
template <typename T>
__global__ void ReluV2Kernel(const size_t num, const T *x, T *y, uint32_t *mask) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += blockDim.x * gridDim.x) {
T v = x[i];
bool p = v > static_cast<T>(0);
y[i] = p ? v : static_cast<T>(0);
auto warp_predict = BallotSync(p, __activemask());
if (LaneId() == 0) {
mask[WarpId(i)] = warp_predict;
}
}
}
template <typename T>
void ReluV2(const size_t num, const T *x, T *y, uint32_t *mask, cudaStream_t cuda_stream) {
ReluV2Kernel<<<kBlocksPerGrid(num), kThreadsPerBlock, 0, cuda_stream>>>(num, x, y, mask);
}
template <typename T>
__global__ void ReluGradV2Kernel(const size_t num, const T *dy, const uint32_t *mask, T *dx) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += blockDim.x * gridDim.x) {
bool p = mask[WarpId(i)] & (1 << LaneId());
dx[i] = p ? dy[i] : static_cast<T>(0);
}
}
template <typename T>
void ReluGradV2(const size_t num, const T *dy, const uint32_t *mask, T *dx, cudaStream_t cuda_stream) {
ReluGradV2Kernel<<<kBlocksPerGrid(num), kThreadsPerBlock, 0, cuda_stream>>>(num, dy, mask, dx);
}
template void ReluV2(const size_t num, const double *x, double *y, uint32_t *mask, cudaStream_t cuda_stream);
template void ReluV2(const size_t num, const float *x, float *y, uint32_t *mask, cudaStream_t cuda_stream);
template void ReluV2(const size_t num, const half *x, half *y, uint32_t *mask, cudaStream_t cuda_stream);
template void ReluV2(const size_t num, const int8_t *x, int8_t *y, uint32_t *mask, cudaStream_t cuda_stream);
template void ReluV2(const size_t num, const int16_t *x, int16_t *y, uint32_t *mask, cudaStream_t cuda_stream);
template void ReluV2(const size_t num, const int32_t *x, int32_t *y, uint32_t *mask, cudaStream_t cuda_stream);
template void ReluV2(const size_t num, const int64_t *x, int64_t *y, uint32_t *mask, cudaStream_t cuda_stream);
template void ReluV2(const size_t num, const uint8_t *x, uint8_t *y, uint32_t *mask, cudaStream_t cuda_stream);
template void ReluGradV2(const size_t num, const double *dy, const uint32_t *mask, double *dx,
cudaStream_t cuda_stream);
template void ReluGradV2(const size_t num, const float *dy, const uint32_t *mask, float *dx, cudaStream_t cuda_stream);
template void ReluGradV2(const size_t num, const half *dy, const uint32_t *mask, half *dx, cudaStream_t cuda_stream);
template void ReluGradV2(const size_t num, const int8_t *dy, const uint32_t *mask, int8_t *dx,
cudaStream_t cuda_stream);
template void ReluGradV2(const size_t num, const int16_t *dy, const uint32_t *mask, int16_t *dx,
cudaStream_t cuda_stream);
template void ReluGradV2(const size_t num, const int32_t *dy, const uint32_t *mask, int32_t *dx,
cudaStream_t cuda_stream);
template void ReluGradV2(const size_t num, const int64_t *dy, const uint32_t *mask, int64_t *dx,
cudaStream_t cuda_stream);
template void ReluGradV2(const size_t num, const uint8_t *dy, const uint32_t *mask, uint8_t *dx,
cudaStream_t cuda_stream);
|
5969edf1fcfd0eacd4aa07b6ffa68e0a09afe58d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <exceptions/cuda_exception.h>
#include <rocblas.h>
#include "../MmulHelper.h"
#include <ops/specials_cuda.h>
#include <helpers/ShapeUtils.h>
#include <helpers/PointersManager.h>
#include <numeric>
namespace sd {
//////////////////////////////////////////////////////////////////////////////
// MXK x KxN = MxN -> actual sequence of axes doesn't matter
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaGemm(const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo,
const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis,
const double alpha, const double beta) {
const T1* A = reinterpret_cast<const T1*>(vA);
const T2* B = reinterpret_cast<const T2*>(vB);
T3* C = reinterpret_cast< T3*>(vC);
__shared__ int K, *coords;
__shared__ bool betaPresent;
__shared__ Nd4jLong cLen, totalThreads;
__shared__ T3 alphaZ, betaZ;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int*>(shmem);
cLen = shape::length(cShapeInfo);
K = shape::shapeOf(const_cast<Nd4jLong*>(aShapeInfo))[aKaxis];
betaPresent = beta;
totalThreads = gridDim.x * blockDim.x;
alphaZ = alpha;
betaZ = beta;
}
__syncthreads();
auto aCoords = coords + threadIdx.x * 6; // 6 = (aRank + bRank + cRank)
auto bCoords = aCoords + 2;
auto cCoords = bCoords + 2;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < cLen; i += totalThreads) {
// evaluate C coordinates
shape::index2coords(i, cShapeInfo, cCoords);
// evaluate A coordinates
aCoords[aMaxis] = cCoords[cMaxis];
aCoords[aKaxis] = 0;
// evaluate B coordinates
bCoords[bKaxis] = 0;
bCoords[bNaxis] = cCoords[cNaxis];
auto aOffset = shape::getOffset(aShapeInfo, aCoords);
auto bOffset = shape::getOffset(bShapeInfo, bCoords);
T3 val = A[aOffset] * B[bOffset]; // first iteration
for (uint j = 1; j < K; ++j) { // rest iterations
aOffset += shape::stride(aShapeInfo)[aKaxis];
bOffset += shape::stride(bShapeInfo)[bKaxis];
val = val + A[aOffset] * B[bOffset];
}
auto cOffset = shape::getOffset(cShapeInfo, cCoords);
if(betaPresent)
C[cOffset] = alphaZ * val + betaZ * C[cOffset];
else
C[cOffset] = alphaZ * val;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualGemm(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, hipStream_t *stream, const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo, const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis, const double alpha, const double beta) {
hipLaunchKernelGGL(( usualCudaGemm<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vA, aShapeInfo, vB, bShapeInfo, vC, cShapeInfo, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta);
}
////////////////////////////////////////////////////////////////////////
// MXN x N = M -> actual sequence of {M,N} axes doesn't matter
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaGemv(const void* vA, const Nd4jLong* aShapeInfo, const void* vX, const Nd4jLong* xShapeInfo, void* vY, const Nd4jLong* yShapeInfo,
const int incx, const int incy, const int aMaxis, const double alpha, const double beta) {
const T1* A = reinterpret_cast<const T1*>(vA);
const T2* X = reinterpret_cast<const T2*>(vX);
T3* Y = reinterpret_cast< T3*>(vY);
__shared__ int M, N;
__shared__ bool betaPresent;
__shared__ Nd4jLong cLen, totalThreads, aNstride, aMstride;
__shared__ T3 alphaZ, betaZ;
if (threadIdx.x == 0) {
N = shape::length(xShapeInfo);
M = shape::length(yShapeInfo);
aMstride = shape::stride(aShapeInfo)[aMaxis];
aNstride = shape::stride(aShapeInfo)[aMaxis == 0 ? 1 : 0];
totalThreads = gridDim.x * blockDim.x;
betaPresent = beta;
alphaZ = alpha;
betaZ = beta;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < M; i += totalThreads) {
// evaluate offsets
auto aOffset = i * aMstride;
auto xOffset = 0;
T3 val = A[aOffset] * X[xOffset]; // first iteration
for (uint j = 1; j < N; ++j) { // rest iterations
aOffset += aNstride;
xOffset += incx;
val = val + A[aOffset] * X[xOffset];
}
auto yOffset = i * incy;
if(betaPresent)
Y[yOffset] = alphaZ * val + betaZ * Y[yOffset];
else
Y[yOffset] = alphaZ * val;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualGemv(const int blocksPerGrid, const int threadsPerBlock, hipStream_t *stream, const void* vA, const Nd4jLong* aShapeInfo, const void* vX, const Nd4jLong* xShapeInfo, void* vY, const Nd4jLong* yShapeInfo, const int incx, const int incy, const int aMaxis, const double alpha, const double beta) {
hipLaunchKernelGGL(( usualCudaGemv<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), 512, *stream, vA, aShapeInfo, vX, xShapeInfo, vY, yShapeInfo, incx, incy, aMaxis, alpha, beta);
}
//////////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaDot(const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) {
T1* X = reinterpret_cast<T1*>(const_cast<void*>(vX));
T2* Y = reinterpret_cast<T2*>(const_cast<void*>(vY));
T3* Z = reinterpret_cast<T3*>(vZ);
extern __shared__ unsigned char shmem[];
auto pairwiseMul = reinterpret_cast<T3*>(shmem);
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < length)
pairwiseMul[tid] = X[tid * incx] * Y[tid * incy];
__syncthreads();
if(tid == 0) {
T3 sum = 0;
for(Nd4jLong i = 0; i < length; ++i)
sum = sum + pairwiseMul[i];
if(beta)
*Z = (T3)alpha * sum + (T3)beta * *Z;
else
*Z = (T3)alpha * sum;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualDot(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) {
hipLaunchKernelGGL(( usualCudaDot<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), length*sizeof(T3) + 128, *stream, length, alpha, vX, incx, vY, incy, beta, vZ);
}
//////////////////////////////////////////////////////////////////////////////
// MXK x KxN = MxN
NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, double alpha, double beta, const char outOrder) {
if(A->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of A array is not equal 2 !");
if(B->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of B array is not equal 2 !");
const auto M = A->sizeAt(0);
const auto K = A->sizeAt(1);
const auto N = B->sizeAt(1);
if(C != nullptr && C->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of C array is not equal 2 !");
if(B->sizeAt(0) != K)
throw std::runtime_error("MmulHelper::mmulMxM cuda: B array has wrong number of rows !");
if(C != nullptr && C->sizeAt(0) != M)
throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of rows !");
if(C != nullptr && C->sizeAt(1) != N)
throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of columns !");
if(C == nullptr)
C = new NDArray(outOrder, {M,N}, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext());
if (C->isEmpty())
return C;
const int major = Environment::getInstance().capabilities()[AffinityManager::currentDeviceId()].first();
const auto aType = A->dataType();
const auto bType = B->dataType();
const auto cType = C->dataType();
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
const bool typeDouble = ABC && aType == DataType::DOUBLE;
const bool typeFloat = ABC && aType == DataType::FLOAT32;
const bool typeHalf = ABC && aType == DataType::HALF && major >= 6;
const bool typeIntFloat = AB && aType == DataType::INT8 && cType == DataType::FLOAT32 && major >= 6;
const bool typeHalfFloat = AB && aType == DataType::HALF && cType == DataType::FLOAT32 && major >= 6;
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
auto handle = reinterpret_cast<hipblasHandle_t *>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = hipblasSetStream(*handle, *stream);
if (status != HIPBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
if(!typeDouble && !typeFloat && !typeHalf && !typeIntFloat && !typeHalfFloat) {
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (C->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * 6 + 128; // 6 = aRank + bRank + cRank
NDArray::prepareSpecialUse({C}, {A, B});
// BUILD_TRIPLE_SELECTOR(aType, bType, cType, usualGemm, (blocksPerGrid, threadsPerBlock, sharedMem, stream, A->specialBuffer(), A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(), C->special(), 0, 1, 0, 1, 0, 1, alpha, beta), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemm, (blocksPerGrid, threadsPerBlock, sharedMem, stream, A->specialBuffer(), A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(), C->specialShapeInfo(), 0, 1, 0, 1, 0, 1, alpha, beta), NUMERIC_TYPES)
NDArray::registerSpecialUse({C}, {A, B});
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult);
}
else {
std::vector<NDArray*> toDelete;
NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C));
bool aMcont = M == 1 || A->strideAt(0) == 1;
bool aKcont = K == 1 || A->strideAt(1) == 1;
bool bKcont = K == 1 || B->strideAt(0) == 1;
bool bNcont = N == 1 || B->strideAt(1) == 1;
bool cMcont = M == 1 || C->strideAt(0) == 1;
bool cNcont = N == 1 || C->strideAt(1) == 1;
if(!aMcont && !aKcont) {
pA = new NDArray(A->dup('f'));
toDelete.push_back(pA);
aMcont = true;
}
if(!bKcont && !bNcont) {
pB = new NDArray(B->dup('f'));
toDelete.push_back(pB);
bKcont = true;
}
if(!cMcont) {
pC = new NDArray(C->dup('f'));
toDelete.push_back(pC);
cMcont = true;
}
const bool transA = !aMcont;
const bool transB = !bKcont;
const int lda = (aMcont && aKcont) ? M : transA ? pA->strideAt(0) : pA->strideAt(1);
const int ldb = (bKcont && bNcont) ? K : transB ? pB->strideAt(0) : pB->strideAt(1);
const int ldc = (cMcont && cNcont) ? M : pC->strideAt(1);
const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transBblas = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N;
NDArray::prepareSpecialUse({pC}, {pA, pB});
// choose appropriate cuda gemm api depending on data types
if(typeDouble) {
status = hipblasDgemm(*handle, transAblas, transBblas, M, N, K, &alpha, (double*)pA->specialBuffer(), lda, (double*)pB->specialBuffer(), ldb, &beta, (double*)pC->specialBuffer(), ldc);
}
else if(typeFloat) {
float alphaF(alpha), betaF(beta);
status = hipblasSgemm(*handle, transAblas, transBblas, M, N, K, &alphaF, (float*)pA->specialBuffer(), lda, (float*)pB->specialBuffer(), ldb, &betaF, (float*)pC->specialBuffer(), ldc);
}
else if(typeHalf) {
float16 alphaH(alpha), betaH(beta);
status = hipblasHgemm(*handle, transAblas, transBblas, M, N, K, &alphaH.data, (__half*)pA->specialBuffer(), lda, (__half*)pB->specialBuffer(), ldb, &betaH.data, (__half*)pC->specialBuffer(), ldc);
}
else if(typeIntFloat) {
float alphaF(alpha), betaF(beta);
status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->specialBuffer(), HIP_R_8I, lda, pB->specialBuffer(), HIP_R_8I, ldb, &betaF, pC->specialBuffer(), HIP_R_32F, ldc);
}
else if(typeHalfFloat) {
float alphaF(alpha), betaF(beta);
status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->specialBuffer(), HIP_R_16F, lda, pB->specialBuffer(), HIP_R_16F, ldb, &betaF, pC->specialBuffer(), HIP_R_32F, ldc);
}
if (status != HIPBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
NDArray::registerSpecialUse({pC}, {pA, pB});
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult);
if(C != pC)
C->assign(pC);
for(int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
}
return C;
}
////////////////////////////////////////////////////////////////////////////
// MXN x N = M
NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, sd::NDArray* Y, const double alpha, const double beta, const char outOrder) {
int xLenDim, yLenDim(0);
if(A->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxV cuda: rank of A array is not equal 2 !");
if(!shape::isCommonVector(X->shapeInfo(), xLenDim))
throw std::runtime_error("MmulHelper::mmulMxV cuda: X array must be vector !");
const auto M = A->sizeAt(0);
const auto N = A->sizeAt(1);
if(Y != nullptr && !shape::isCommonVector(Y->shapeInfo(), yLenDim))
throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array must be vector !");
if(X->lengthOf() != N)
throw std::runtime_error("MmulHelper::mmulMxV cuda: X vector has wrong length !");
if(Y != nullptr && Y->lengthOf() != M)
throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array has wrong length !");
if(Y == nullptr)
Y = new NDArray(outOrder, {M}, DataTypeUtils::pickPairwiseResultType(A->dataType(), X->dataType()), A->getContext());
if (Y->isEmpty())
return Y;
const int incx = X->strideAt(xLenDim);
const int incy = Y->strideAt(yLenDim);
const auto aType = A->dataType();
const auto xType = X->dataType();
const auto yType = Y->dataType();
const bool AX(aType == xType), AY(aType == yType), AXY(AX && AY);
const bool typeDouble = AXY && aType == DataType::DOUBLE;
const bool typeFloat = AXY && aType == DataType::FLOAT32;
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
auto handle = reinterpret_cast<hipblasHandle_t *>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = hipblasSetStream(*handle, *stream);
if (status != HIPBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status);
if(!typeDouble && !typeFloat) {
const int threadsPerBlock = MAX_NUM_THREADS;
const int blocksPerGrid = (M + threadsPerBlock - 1) / threadsPerBlock;
NDArray::prepareSpecialUse({Y}, {A, X});
// BUILD_TRIPLE_SELECTOR(aType, xType, yType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, A->specialBuffer(), A->specialShapeInfo(), X->specialBuffer(), X->specialShapeInfo(), Y->specialBuffer(), Y->special(), incx, incy, 0, alpha, beta), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(xType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, A->specialBuffer(), A->specialShapeInfo(), X->specialBuffer(), X->specialShapeInfo(), Y->specialBuffer(), Y->specialShapeInfo(), incx, incy, 0, alpha, beta), NUMERIC_TYPES)
NDArray::registerSpecialUse({Y}, {A, X});
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult);
}
else {
NDArray *pA(const_cast<NDArray*>(A));
bool aMcont = M == 1 || A->strideAt(0) == 1;
bool aNcont = N == 1 || A->strideAt(1) == 1;
if(!aMcont && !aNcont) {
pA = new NDArray(A->dup('f'));
aMcont = true;
}
const bool transA = !aMcont;
const int lda = (aMcont && aNcont) ? M : transA ? pA->strideAt(0) : pA->strideAt(1);
const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
NDArray::prepareSpecialUse({Y}, {pA, X});
// choose appropriate cuda gemm api depending on data types
if(typeDouble) {
status = hipblasDgemv(*handle, transAblas, transA ? N : M, transA ? M : N, &alpha, (double*)pA->specialBuffer(), lda, (double*)X->specialBuffer(), incx, &beta, (double*)Y->specialBuffer(), incy);
}
else if(typeFloat) {
float alphaF(alpha), betaF(beta);
status = hipblasSgemv(*handle, transAblas, transA ? N : M, transA ? M : N, &alphaF, (float*)pA->specialBuffer(), lda, (float*)X->specialBuffer(), incx, &betaF, (float*)Y->specialBuffer(), incy);
}
if (status != HIPBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status);
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult);
NDArray::registerSpecialUse({Y}, {pA, X});
if(pA != A)
delete pA;
}
return Y;
}
////////////////////////////////////////////////////////////////////////////
// (X * Y) = Z[0]
NDArray* MmulHelper::dot(const NDArray* X, const NDArray* Y, sd::NDArray* Z, const double alpha, const double beta) {
int xLenDim(0), yLenDim(0);
if(!shape::isCommonVector(X->shapeInfo(), xLenDim))
throw std::runtime_error("MmulHelper::dot cuda: X array must be vector !");
if(!shape::isCommonVector(Y->shapeInfo(), yLenDim))
throw std::runtime_error("MmulHelper::dot cuda: Y array must be vector !");
if(Z != nullptr && !Z->isScalar())
throw std::runtime_error("MmulHelper::dot cuda: Z array must be scalar !");
const auto length = X->lengthOf();
if(Y->lengthOf() != length)
throw std::runtime_error("MmulHelper::dot cuda: lengths of input vectors are different !");
if(Z == nullptr)
Z = new NDArray(DataTypeUtils::pickPairwiseResultType(X->dataType(), Y->dataType()), X->getContext());
const Nd4jLong incx = X->strideAt(xLenDim);
const Nd4jLong incy = Y->strideAt(yLenDim);
const auto xType = X->dataType();
const auto yType = Y->dataType();
const auto zType = Z->dataType();
if(!X->isActualOnDeviceSide()) X->syncToDevice();
if(!Y->isActualOnDeviceSide()) Y->syncToDevice();
if(!Z->isActualOnDeviceSide()) Z->syncToDevice();
hipStream_t* stream = X->getContext()->getCudaStream();
dim3 threadsPerBlock(512);
dim3 blocksPerGrid(1);
if (length > 512)
threadsPerBlock.x = math::nd4j_ceil<double, int>(static_cast<double>(length) / 512);
NDArray::prepareSpecialUse({Z}, {X, Y});
//BUILD_TRIPLE_SELECTOR(xType, yType, zType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->specialBuffer(), incx, Y->specialBuffer(), incy, beta, Z->specialBuffer()), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(xType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->specialBuffer(), incx, Y->specialBuffer(), incy, beta, Z->specialBuffer()), NUMERIC_TYPES)
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::dot cuda failed !", cudaResult);
NDArray::registerSpecialUse({Z}, {X, Y});
return Z;
}
//////////////////////////////////////////////////////////////////////////////
// [bS,M,K] x [bS,K,N] = [bS,M,N]
// [bS,M,K] x [K,N] = [bS,M,N]
// [M,K] x [bS,K,N] = [bS,M,N]
// bS could stand for several axes
template <typename T1, typename T2, typename T3>
static __global__ void batchedCudaGemm(const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo,
const int* aBatchDims, const int* bBatchDims, const int* cBatchDims,
const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis,
const double alpha, const double beta) {
const T1* A = reinterpret_cast<const T1*>(vA);
const T2* B = reinterpret_cast<const T2*>(vB);
T3* C = reinterpret_cast< T3*>(vC);
__shared__ bool betaPresent;
__shared__ int aRank, bRank, cRank, K, *coords;
__shared__ Nd4jLong cLen, totalThreads;
__shared__ T3 alphaZ, betaZ;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int*>(shmem);
cLen = shape::length(cShapeInfo);
K = shape::shapeOf(const_cast<Nd4jLong*>(aShapeInfo))[aKaxis];
totalThreads = gridDim.x * blockDim.x;
aRank = shape::rank(aShapeInfo);
bRank = shape::rank(bShapeInfo);
cRank = shape::rank(cShapeInfo);
betaPresent = beta;
alphaZ = alpha;
betaZ = beta;
}
__syncthreads();
auto aCoords = coords + threadIdx.x * (aRank + bRank + cRank);
auto bCoords = aCoords + aRank;
auto cCoords = bCoords + bRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < cLen; i += totalThreads) {
// evaluate C coordinates
shape::index2coords(i, cShapeInfo, cCoords);
// calculate index of current batch
Nd4jLong batchInd;
if(cBatchDims != nullptr)
batchInd = shape::coords2index(cShapeInfo, cBatchDims, cRank - 2, cCoords);
// evaluate A coordinates
if(aBatchDims != nullptr)
shape::index2coords(batchInd, aShapeInfo, aBatchDims, aRank - 2, aCoords);
aCoords[aMaxis] = cCoords[cMaxis];
aCoords[aKaxis] = 0;
// evaluate B coordinates
if(bBatchDims != nullptr)
shape::index2coords(batchInd, bShapeInfo, bBatchDims, bRank - 2, bCoords);
bCoords[bKaxis] = 0;
bCoords[bNaxis] = cCoords[cNaxis];
auto aOffset = shape::getOffset(aShapeInfo, aCoords);
auto bOffset = shape::getOffset(bShapeInfo, bCoords);
T3 val = A[aOffset] * B[bOffset]; // first iteration
for (uint j = 1; j < K; ++j) { // rest iterations
aOffset += shape::stride(aShapeInfo)[aKaxis];
bOffset += shape::stride(bShapeInfo)[bKaxis];
val = val + A[aOffset] * B[bOffset];
}
auto cOffset = shape::getOffset(cShapeInfo, cCoords);
if(betaPresent)
C[cOffset] = alphaZ * val + betaZ * C[cOffset];
else
C[cOffset] = alphaZ * val;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void batchedGemm(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, hipStream_t *stream, const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo, const int* aBatchDims, const int* bBatchDims, const int* cBatchDims, const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis, const double alpha, const double beta) {
hipLaunchKernelGGL(( batchedCudaGemm<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vA, aShapeInfo, vB, bShapeInfo, vC, cShapeInfo, aBatchDims, bBatchDims, cBatchDims, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta);
}
///////////////////////////////////////////////////////////////////
NDArray* MmulHelper::mmulNxN(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) {
const int aRank = A->rankOf();
const int bRank = B->rankOf();
// input ranks validation
if(aRank > bRank && bRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
else if(bRank > aRank && aRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
else if (aRank == bRank ) {
for(int i = 0; i < aRank - 2; ++i)
if(A->sizeAt(i) != B->sizeAt(i))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
}
if(A->sizeAt(-1) != B->sizeAt(-2))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
// validation of C array
std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
if(C != nullptr ) {
if(!C->isSameShape(cExpectedShape))
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !");
}
else
C = new NDArray(outOrder, cExpectedShape, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext());
if (C->isEmpty())
return C;
const int cRank = C->rankOf();
const int aMaxis(aRank-2), aKaxis(aRank-1), bKaxis(bRank-2), bNaxis(bRank-1), cMaxis(cRank-2), cNaxis(cRank-1);
const int threadsPerBlock = MAX_NUM_THREADS / 8;
const int blocksPerGrid = (C->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * (aRank + bRank + cRank) + 128;
PointersManager manager(A->getContext(), "MmulHelper::mmulNxN");
const int *aBatchDims(nullptr), *bBatchDims(nullptr), *cBatchDims(nullptr);
if(aRank > 2)
aBatchDims = reinterpret_cast<int*>(manager.replicatePointer(ShapeUtils::evalDimsToExclude(aRank, {aMaxis, aKaxis}).data(), (aRank - 2) * sizeof(int)));
if(bRank > 2)
bBatchDims = reinterpret_cast<int*>(manager.replicatePointer(ShapeUtils::evalDimsToExclude(bRank, {bKaxis, bNaxis}).data(), (bRank - 2) * sizeof(int)));
if(cRank > 2)
cBatchDims = reinterpret_cast<int*>(manager.replicatePointer(ShapeUtils::evalDimsToExclude(cRank, {cMaxis, cNaxis}).data(), (cRank - 2) * sizeof(int)));
NDArray::prepareSpecialUse({C}, {A, B});
// BUILD_TRIPLE_SELECTOR(A->dataType(), b->dataType(), C->dataType(), batchedGemm, (blocksPerGrid, threadsPerBlock, A->getContext()->getCudaStream(), A->specialBuffer(), A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(), C->special(), aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(A->dataType(), batchedGemm, (blocksPerGrid, threadsPerBlock, sharedMem, A->getContext()->getCudaStream(), A->specialBuffer(), A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(), C->specialShapeInfo(), aBatchDims, bBatchDims, cBatchDims, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta), NUMERIC_TYPES)
NDArray::registerSpecialUse({C}, {A, B});
manager.synchronize();
return C;
}
/*
//////////////////////////////////////////////////////////////////////////////
// MXN x N = M
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaGemv(const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) {
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
T2* X = reinterpret_cast<T2*>(const_cast<void*>(vX));
T3* Y = reinterpret_cast<T3*>(vY);
__shared__ T3 alphaZ, betaZ;
__shared__ Nd4jLong strideArow, strideAcol;
const int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row == 0) {
alphaZ = alpha;
betaZ = beta;
if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; }
}
__syncthreads();
T3 val = 0;
if (row < M)
for (int i = 0; i < N; i++)
val = val + A[row * strideArow + i * strideAcol] * X[i * incx];
Y[row * incy] = alphaZ * val + betaZ * Y[row * incy];
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualGemv(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) {
usualCudaGemv<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, M, N, alpha, vA, lda, vX, incx, beta, vY, incy);
}
*/
/*
//////////////////////////////////////////////////////////////////////////////
MXK x KxN = MxN
C array must be in f order
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaGemm(const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) {
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
T2* B = reinterpret_cast<T2*>(const_cast<void*>(vB));
T3* C = reinterpret_cast<T3*>(vC);
__shared__ T3 alphaZ, betaZ;
__shared__ Nd4jLong strideArow, strideAcol, strideBrow, strideBcol;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row == 0 && col == 0) {
alphaZ = alpha;
betaZ = beta;
if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; }
if(transB) { strideBrow = ldb; strideBcol = 1; } else { strideBrow = 1; strideBcol = ldb; }
}
__syncthreads();
T3 val = 0;
if (row < M && col < N)
for (int i = 0; i < K; i++)
val = val + A[row * strideArow + i * strideAcol] * B[i * strideBrow + col * strideBcol];
C[row + col * ldc] = alphaZ * val + betaZ * C[row + col * ldc];
}
//////////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualGemm(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) {
usualCudaGemm<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, transB, M, N, K, alpha, vA, lda, vB, ldb, beta, vC, ldc);
}
*/
//////////////////////////////////////////////////////////////////////////
/*
NDArray* MmulHelper::mmulNxNold1(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) {
const int aRank = A->rankOf();
const int bRank = B->rankOf();
// input ranks validation
if(aRank > bRank && bRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
else if(bRank > aRank && aRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
else if (aRank == bRank ) {
for(int i = 0; i < aRank - 2; ++i)
if(A->sizeAt(i) != B->sizeAt(i))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
}
if(A->sizeAt(-1) != B->sizeAt(-2))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
// validation of C array
std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
if(C != nullptr ) {
if(!C->isSameShape(cExpectedShape))
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !");
}
else {
C = new NDArray(outOrder, cExpectedShape, B->dataType());
}
// multiplication
const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(C->rankOf(), {-2, -1});
const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(C->shapeInfo(), dimsToExclude);
std::vector<Nd4jLong> idxRanges(2 * C->rankOf());
// #pragma omp parallel for schedule(guided) firstprivate(idxRanges)
for(Nd4jLong i = 0; i < numOfSubArrs; ++i) {
ShapeUtils::evalIdxRangesForSubArr(i, C->shapeInfo(), dimsToExclude, idxRanges.data());
NDArray cSubArr = (*C)(idxRanges);
if(aRank > bRank) {
NDArray aSubArr = (*A)(idxRanges);
mmulMxM(&aSubArr, B, &cSubArr, 1., 0., outOrder);
}
else if(bRank > aRank) {
NDArray bSubArr = (*B)(idxRanges);
mmulMxM(A, &bSubArr, &cSubArr, 1., 0, outOrder);
}
else {
NDArray aSubArr = (*A)(idxRanges);
NDArray bSubArr = (*B)(idxRanges);
mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., outOrder);
}
}
return C;
}
*/
//////////////////////////////////////////////////////////////////////////
// [bS,M,K] x [bS,K,N] = [bS,M,N]
// [bS,M,K] x [K,N] = [bS,M,N]
// [M,K] x [bS,K,N] = [bS,M,N]
// bS could stand for several axes
/*
NDArray* MmulHelper::mmulNxNold2(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) {
const int aRank = A->rankOf();
const int bRank = B->rankOf();
// input ranks validation
if(aRank > bRank && bRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
else if(bRank > aRank && aRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
else if (aRank == bRank ) {
for(int i = 0; i < aRank - 2; ++i)
if(A->sizeAt(i) != B->sizeAt(i))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
}
if(A->sizeAt(-1) != B->sizeAt(-2))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
// validation of C array
std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
if(C != nullptr ) {
if(!C->isSameShape(cExpectedShape))
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !");
}
else
C = new NDArray(outOrder, cExpectedShape, B->dataType());
const int cRank = C->rankOf();
const auto M = A->sizeAt(-2);
const auto K = A->sizeAt(-1);
const auto N = B->sizeAt(-1);
NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C));
std::vector<NDArray*> toDelete;
bool aMcont = M == 1 || A->strideAt(-2) == 1;
bool aKcont = K == 1 || A->strideAt(-1) == 1;
bool bKcont = K == 1 || B->strideAt(-2) == 1;
bool bNcont = N == 1 || B->strideAt(-1) == 1;
bool cMcont = M == 1 || C->strideAt(-2) == 1;
bool cNcont = N == 1 || C->strideAt(-1) == 1;
if(!aMcont && !aKcont) {
pA = new NDArray(A->dup('c'));
toDelete.push_back(pA);
aKcont = true;
}
if(!bKcont && !bNcont) {
pB = new NDArray(B->dup('c'));
toDelete.push_back(pB);
bNcont = true;
}
std::vector<int> permut(cRank);
if(!cMcont) {
std::iota(permut.begin(), permut.end(), 0);
permut[cRank - 2] = cRank - 1;
permut[cRank - 1] = cRank - 2; // swap two last dimensions [..., M,N] -> [..., N,M]
auto Cpermut = C->permute(permut);
pC = new NDArray('c', Cpermut.getShapeAsVector(), Cpermut.dataType(), A->getContext());
pC->assign(Cpermut);
toDelete.push_back(pC);
cMcont = true;
}
const auto aType = pA->dataType();
const auto bType = pB->dataType();
const auto cType = pC->dataType();
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
bool badTypes = false;
hipDataType cudaType, cudaAType, cudaBType, cudaCType;
if(ABC && aType == DataType::HALF) {
cudaType = cudaAType = cudaBType = cudaCType = HIP_R_16F;
}
else if(ABC && aType == DataType::FLOAT32) {
cudaType = cudaAType = cudaBType = cudaCType = HIP_R_32F;
}
else if(ABC && aType == DataType::DOUBLE) {
cudaType = cudaAType = cudaBType = cudaCType = HIP_R_64F;
}
else if(AB && cType == DataType::FLOAT32 && aType == DataType::INT8) {
cudaType = cudaCType = HIP_R_32F;
cudaAType = cudaBType = HIP_R_8I;
}
else if(AB && cType == DataType::FLOAT32 && aType == DataType::HALF) {
cudaType = cudaCType = HIP_R_32F;
cudaAType = cudaBType = HIP_R_16F;
}
else
badTypes = true;
const int bS = pC->lengthOf() / (M*N);
const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(cRank, {-2, -1});
NDArray::prepareSpecialUse({pC}, {pA, pB});
if(!badTypes) {
std::vector<Nd4jLong> subArrOffsets(bS);
std::vector<Nd4jLong> subArrShapeInfo(shape::shapeInfoLength(2)); // all sub-arrays have rank = 2
std::vector<void*> aSubArrs(bS), bSubArrs(bS), cSubArrs(bS);
if(aRank > 2)
shape::calcSubArrsShapeInfoAndOffsets(pA->shapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(), subArrShapeInfo.data(), subArrOffsets.data());
for (int i = 0; i < bS; ++i)
aSubArrs[i] = aRank == 2 ? pA->specialBuffer() : pA->specialBuffer() + subArrOffsets[i] * pA->sizeOfT();
if(bRank > 2)
shape::calcSubArrsShapeInfoAndOffsets(pB->shapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(), subArrShapeInfo.data(), subArrOffsets.data());
for (int i = 0; i < bS; ++i)
bSubArrs[i] = bRank == 2 ? pB->specialBuffer() : pB->specialBuffer() + subArrOffsets[i] * pB->sizeOfT();
shape::calcSubArrsShapeInfoAndOffsets(pC->shapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(), subArrShapeInfo.data(), subArrOffsets.data());
for (int i = 0; i < bS; ++i)
cSubArrs[i] = pC->specialBuffer() + subArrOffsets[i] * pC->sizeOfT();
PointersManager manager(A->getContext(), "mmulNxN");
const void** aSubArrsCuda = reinterpret_cast<const void **>(manager.replicatePointer(aSubArrs.data(), aSubArrs.size() * sizeof(void*)));
const void** bSubArrsCuda = reinterpret_cast<const void **>(manager.replicatePointer(bSubArrs.data(), bSubArrs.size() * sizeof(void*)));
void** cSubArrsCuda = reinterpret_cast< void **>(manager.replicatePointer(cSubArrs.data(), cSubArrs.size() * sizeof(void*)));
const bool transA = !aMcont;
const bool transB = !bKcont;
const int lda = (aMcont && aKcont) ? M : transA ? pA->strideAt(-2) : pA->strideAt(-1);
const int ldb = (bKcont && bNcont) ? K : transB ? pB->strideAt(-2) : pB->strideAt(-1);
const int ldc = (cMcont && cNcont) ? M : C != pC ? pC->strideAt(-2) : pC->strideAt(-1);
const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transBblas = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N;
union Coeff {__half _h; float _f; double _d; };
Coeff uAlpha, uBeta;
if(cudaType == HIP_R_16F) {
uAlpha._h = alpha;
uBeta._h = beta;
}
else if(cudaType == HIP_R_32F) {
uAlpha._f = alpha;
uBeta._f = beta;
}
else if(cudaType == HIP_R_64F) {
uAlpha._d = alpha;
uBeta._d = beta;
}
auto handle = reinterpret_cast<hipblasHandle_t *>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = hipblasSetStream(*handle, *stream);
if (status != HIPBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", status);
status = hipblasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &uAlpha, aSubArrsCuda, cudaAType, lda, bSubArrsCuda, cudaBType, ldb, &uBeta, cSubArrsCuda, cudaCType, ldc, bS, cudaType, HIPBLAS_GEMM_DEFAULT);
if (status != HIPBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", status);
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", cudaResult);
}
else {
std::vector<Nd4jLong> idxRanges(2 * pC->rankOf());
for(Nd4jLong i = 0; i < bS; ++i) {
ShapeUtils::evalIdxRangesForSubArr(i, pC->shapeInfo(), dimsToExclude, idxRanges.data());
NDArray cSubArr = (*pC)(idxRanges);
if(aRank > bRank) {
NDArray aSubArr = (*pA)(idxRanges);
mmulMxM(&aSubArr, pB, &cSubArr, 1., 0., pC->ordering());
}
else if(bRank > aRank) {
NDArray bSubArr = (*pB)(idxRanges);
mmulMxM(pA, &bSubArr, &cSubArr, 1., 0, pC->ordering());
}
else {
NDArray aSubArr = (*pA)(idxRanges);
NDArray bSubArr = (*pB)(idxRanges);
mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., pC->ordering());
}
}
}
NDArray::registerSpecialUse({pC}, {pA, pB});
if(C != pC)
C->assign(pC->permute(permut));
for(int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
return C;
}
*/
//BUILD_TRIPLE_TEMPLATE(template void usualGemm, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
//BUILD_TRIPLE_TEMPLATE(template void usualGemv, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vB, const int incx, const double beta, void* vC, const int incy), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
//BUILD_TRIPLE_TEMPLATE(template void usualDot, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
} | 5969edf1fcfd0eacd4aa07b6ffa68e0a09afe58d.cu | /*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <exceptions/cuda_exception.h>
#include <cublas_v2.h>
#include "../MmulHelper.h"
#include <ops/specials_cuda.h>
#include <helpers/ShapeUtils.h>
#include <helpers/PointersManager.h>
#include <numeric>
namespace sd {
//////////////////////////////////////////////////////////////////////////////
// MXK x KxN = MxN -> actual sequence of axes doesn't matter
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaGemm(const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo,
const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis,
const double alpha, const double beta) {
const T1* A = reinterpret_cast<const T1*>(vA);
const T2* B = reinterpret_cast<const T2*>(vB);
T3* C = reinterpret_cast< T3*>(vC);
__shared__ int K, *coords;
__shared__ bool betaPresent;
__shared__ Nd4jLong cLen, totalThreads;
__shared__ T3 alphaZ, betaZ;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int*>(shmem);
cLen = shape::length(cShapeInfo);
K = shape::shapeOf(const_cast<Nd4jLong*>(aShapeInfo))[aKaxis];
betaPresent = beta;
totalThreads = gridDim.x * blockDim.x;
alphaZ = alpha;
betaZ = beta;
}
__syncthreads();
auto aCoords = coords + threadIdx.x * 6; // 6 = (aRank + bRank + cRank)
auto bCoords = aCoords + 2;
auto cCoords = bCoords + 2;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < cLen; i += totalThreads) {
// evaluate C coordinates
shape::index2coords(i, cShapeInfo, cCoords);
// evaluate A coordinates
aCoords[aMaxis] = cCoords[cMaxis];
aCoords[aKaxis] = 0;
// evaluate B coordinates
bCoords[bKaxis] = 0;
bCoords[bNaxis] = cCoords[cNaxis];
auto aOffset = shape::getOffset(aShapeInfo, aCoords);
auto bOffset = shape::getOffset(bShapeInfo, bCoords);
T3 val = A[aOffset] * B[bOffset]; // first iteration
for (uint j = 1; j < K; ++j) { // rest iterations
aOffset += shape::stride(aShapeInfo)[aKaxis];
bOffset += shape::stride(bShapeInfo)[bKaxis];
val = val + A[aOffset] * B[bOffset];
}
auto cOffset = shape::getOffset(cShapeInfo, cCoords);
if(betaPresent)
C[cOffset] = alphaZ * val + betaZ * C[cOffset];
else
C[cOffset] = alphaZ * val;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualGemm(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, cudaStream_t *stream, const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo, const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis, const double alpha, const double beta) {
usualCudaGemm<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vA, aShapeInfo, vB, bShapeInfo, vC, cShapeInfo, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta);
}
////////////////////////////////////////////////////////////////////////
// MXN x N = M -> actual sequence of {M,N} axes doesn't matter
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaGemv(const void* vA, const Nd4jLong* aShapeInfo, const void* vX, const Nd4jLong* xShapeInfo, void* vY, const Nd4jLong* yShapeInfo,
const int incx, const int incy, const int aMaxis, const double alpha, const double beta) {
const T1* A = reinterpret_cast<const T1*>(vA);
const T2* X = reinterpret_cast<const T2*>(vX);
T3* Y = reinterpret_cast< T3*>(vY);
__shared__ int M, N;
__shared__ bool betaPresent;
__shared__ Nd4jLong cLen, totalThreads, aNstride, aMstride;
__shared__ T3 alphaZ, betaZ;
if (threadIdx.x == 0) {
N = shape::length(xShapeInfo);
M = shape::length(yShapeInfo);
aMstride = shape::stride(aShapeInfo)[aMaxis];
aNstride = shape::stride(aShapeInfo)[aMaxis == 0 ? 1 : 0];
totalThreads = gridDim.x * blockDim.x;
betaPresent = beta;
alphaZ = alpha;
betaZ = beta;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < M; i += totalThreads) {
// evaluate offsets
auto aOffset = i * aMstride;
auto xOffset = 0;
T3 val = A[aOffset] * X[xOffset]; // first iteration
for (uint j = 1; j < N; ++j) { // rest iterations
aOffset += aNstride;
xOffset += incx;
val = val + A[aOffset] * X[xOffset];
}
auto yOffset = i * incy;
if(betaPresent)
Y[yOffset] = alphaZ * val + betaZ * Y[yOffset];
else
Y[yOffset] = alphaZ * val;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualGemv(const int blocksPerGrid, const int threadsPerBlock, cudaStream_t *stream, const void* vA, const Nd4jLong* aShapeInfo, const void* vX, const Nd4jLong* xShapeInfo, void* vY, const Nd4jLong* yShapeInfo, const int incx, const int incy, const int aMaxis, const double alpha, const double beta) {
usualCudaGemv<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 512, *stream>>>(vA, aShapeInfo, vX, xShapeInfo, vY, yShapeInfo, incx, incy, aMaxis, alpha, beta);
}
//////////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaDot(const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) {
T1* X = reinterpret_cast<T1*>(const_cast<void*>(vX));
T2* Y = reinterpret_cast<T2*>(const_cast<void*>(vY));
T3* Z = reinterpret_cast<T3*>(vZ);
extern __shared__ unsigned char shmem[];
auto pairwiseMul = reinterpret_cast<T3*>(shmem);
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < length)
pairwiseMul[tid] = X[tid * incx] * Y[tid * incy];
__syncthreads();
if(tid == 0) {
T3 sum = 0;
for(Nd4jLong i = 0; i < length; ++i)
sum = sum + pairwiseMul[i];
if(beta)
*Z = (T3)alpha * sum + (T3)beta * *Z;
else
*Z = (T3)alpha * sum;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualDot(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) {
usualCudaDot<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, length*sizeof(T3) + 128, *stream>>>(length, alpha, vX, incx, vY, incy, beta, vZ);
}
//////////////////////////////////////////////////////////////////////////////
// MXK x KxN = MxN
NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, double alpha, double beta, const char outOrder) {
if(A->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of A array is not equal 2 !");
if(B->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of B array is not equal 2 !");
const auto M = A->sizeAt(0);
const auto K = A->sizeAt(1);
const auto N = B->sizeAt(1);
if(C != nullptr && C->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of C array is not equal 2 !");
if(B->sizeAt(0) != K)
throw std::runtime_error("MmulHelper::mmulMxM cuda: B array has wrong number of rows !");
if(C != nullptr && C->sizeAt(0) != M)
throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of rows !");
if(C != nullptr && C->sizeAt(1) != N)
throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of columns !");
if(C == nullptr)
C = new NDArray(outOrder, {M,N}, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext());
if (C->isEmpty())
return C;
const int major = Environment::getInstance().capabilities()[AffinityManager::currentDeviceId()].first();
const auto aType = A->dataType();
const auto bType = B->dataType();
const auto cType = C->dataType();
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
const bool typeDouble = ABC && aType == DataType::DOUBLE;
const bool typeFloat = ABC && aType == DataType::FLOAT32;
const bool typeHalf = ABC && aType == DataType::HALF && major >= 6;
const bool typeIntFloat = AB && aType == DataType::INT8 && cType == DataType::FLOAT32 && major >= 6;
const bool typeHalfFloat = AB && aType == DataType::HALF && cType == DataType::FLOAT32 && major >= 6;
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
auto handle = reinterpret_cast<cublasHandle_t *>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = cublasSetStream_v2(*handle, *stream);
if (status != CUBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
if(!typeDouble && !typeFloat && !typeHalf && !typeIntFloat && !typeHalfFloat) {
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (C->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * 6 + 128; // 6 = aRank + bRank + cRank
NDArray::prepareSpecialUse({C}, {A, B});
// BUILD_TRIPLE_SELECTOR(aType, bType, cType, usualGemm, (blocksPerGrid, threadsPerBlock, sharedMem, stream, A->specialBuffer(), A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(), C->special(), 0, 1, 0, 1, 0, 1, alpha, beta), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemm, (blocksPerGrid, threadsPerBlock, sharedMem, stream, A->specialBuffer(), A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(), C->specialShapeInfo(), 0, 1, 0, 1, 0, 1, alpha, beta), NUMERIC_TYPES)
NDArray::registerSpecialUse({C}, {A, B});
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult);
}
else {
std::vector<NDArray*> toDelete;
NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C));
bool aMcont = M == 1 || A->strideAt(0) == 1;
bool aKcont = K == 1 || A->strideAt(1) == 1;
bool bKcont = K == 1 || B->strideAt(0) == 1;
bool bNcont = N == 1 || B->strideAt(1) == 1;
bool cMcont = M == 1 || C->strideAt(0) == 1;
bool cNcont = N == 1 || C->strideAt(1) == 1;
if(!aMcont && !aKcont) {
pA = new NDArray(A->dup('f'));
toDelete.push_back(pA);
aMcont = true;
}
if(!bKcont && !bNcont) {
pB = new NDArray(B->dup('f'));
toDelete.push_back(pB);
bKcont = true;
}
if(!cMcont) {
pC = new NDArray(C->dup('f'));
toDelete.push_back(pC);
cMcont = true;
}
const bool transA = !aMcont;
const bool transB = !bKcont;
const int lda = (aMcont && aKcont) ? M : transA ? pA->strideAt(0) : pA->strideAt(1);
const int ldb = (bKcont && bNcont) ? K : transB ? pB->strideAt(0) : pB->strideAt(1);
const int ldc = (cMcont && cNcont) ? M : pC->strideAt(1);
const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transBblas = transB ? CUBLAS_OP_T : CUBLAS_OP_N;
NDArray::prepareSpecialUse({pC}, {pA, pB});
// choose appropriate cuda gemm api depending on data types
if(typeDouble) {
status = cublasDgemm(*handle, transAblas, transBblas, M, N, K, &alpha, (double*)pA->specialBuffer(), lda, (double*)pB->specialBuffer(), ldb, &beta, (double*)pC->specialBuffer(), ldc);
}
else if(typeFloat) {
float alphaF(alpha), betaF(beta);
status = cublasSgemm(*handle, transAblas, transBblas, M, N, K, &alphaF, (float*)pA->specialBuffer(), lda, (float*)pB->specialBuffer(), ldb, &betaF, (float*)pC->specialBuffer(), ldc);
}
else if(typeHalf) {
float16 alphaH(alpha), betaH(beta);
status = cublasHgemm(*handle, transAblas, transBblas, M, N, K, &alphaH.data, (__half*)pA->specialBuffer(), lda, (__half*)pB->specialBuffer(), ldb, &betaH.data, (__half*)pC->specialBuffer(), ldc);
}
else if(typeIntFloat) {
float alphaF(alpha), betaF(beta);
status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->specialBuffer(), CUDA_R_8I, lda, pB->specialBuffer(), CUDA_R_8I, ldb, &betaF, pC->specialBuffer(), CUDA_R_32F, ldc);
}
else if(typeHalfFloat) {
float alphaF(alpha), betaF(beta);
status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->specialBuffer(), CUDA_R_16F, lda, pB->specialBuffer(), CUDA_R_16F, ldb, &betaF, pC->specialBuffer(), CUDA_R_32F, ldc);
}
if (status != CUBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
NDArray::registerSpecialUse({pC}, {pA, pB});
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult);
if(C != pC)
C->assign(pC);
for(int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
}
return C;
}
////////////////////////////////////////////////////////////////////////////
// MXN x N = M
NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, sd::NDArray* Y, const double alpha, const double beta, const char outOrder) {
int xLenDim, yLenDim(0);
if(A->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxV cuda: rank of A array is not equal 2 !");
if(!shape::isCommonVector(X->shapeInfo(), xLenDim))
throw std::runtime_error("MmulHelper::mmulMxV cuda: X array must be vector !");
const auto M = A->sizeAt(0);
const auto N = A->sizeAt(1);
if(Y != nullptr && !shape::isCommonVector(Y->shapeInfo(), yLenDim))
throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array must be vector !");
if(X->lengthOf() != N)
throw std::runtime_error("MmulHelper::mmulMxV cuda: X vector has wrong length !");
if(Y != nullptr && Y->lengthOf() != M)
throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array has wrong length !");
if(Y == nullptr)
Y = new NDArray(outOrder, {M}, DataTypeUtils::pickPairwiseResultType(A->dataType(), X->dataType()), A->getContext());
if (Y->isEmpty())
return Y;
const int incx = X->strideAt(xLenDim);
const int incy = Y->strideAt(yLenDim);
const auto aType = A->dataType();
const auto xType = X->dataType();
const auto yType = Y->dataType();
const bool AX(aType == xType), AY(aType == yType), AXY(AX && AY);
const bool typeDouble = AXY && aType == DataType::DOUBLE;
const bool typeFloat = AXY && aType == DataType::FLOAT32;
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
auto handle = reinterpret_cast<cublasHandle_t *>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = cublasSetStream_v2(*handle, *stream);
if (status != CUBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status);
if(!typeDouble && !typeFloat) {
const int threadsPerBlock = MAX_NUM_THREADS;
const int blocksPerGrid = (M + threadsPerBlock - 1) / threadsPerBlock;
NDArray::prepareSpecialUse({Y}, {A, X});
// BUILD_TRIPLE_SELECTOR(aType, xType, yType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, A->specialBuffer(), A->specialShapeInfo(), X->specialBuffer(), X->specialShapeInfo(), Y->specialBuffer(), Y->special(), incx, incy, 0, alpha, beta), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(xType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, A->specialBuffer(), A->specialShapeInfo(), X->specialBuffer(), X->specialShapeInfo(), Y->specialBuffer(), Y->specialShapeInfo(), incx, incy, 0, alpha, beta), NUMERIC_TYPES)
NDArray::registerSpecialUse({Y}, {A, X});
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult);
}
else {
NDArray *pA(const_cast<NDArray*>(A));
bool aMcont = M == 1 || A->strideAt(0) == 1;
bool aNcont = N == 1 || A->strideAt(1) == 1;
if(!aMcont && !aNcont) {
pA = new NDArray(A->dup('f'));
aMcont = true;
}
const bool transA = !aMcont;
const int lda = (aMcont && aNcont) ? M : transA ? pA->strideAt(0) : pA->strideAt(1);
const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
NDArray::prepareSpecialUse({Y}, {pA, X});
// choose appropriate cuda gemm api depending on data types
if(typeDouble) {
status = cublasDgemv(*handle, transAblas, transA ? N : M, transA ? M : N, &alpha, (double*)pA->specialBuffer(), lda, (double*)X->specialBuffer(), incx, &beta, (double*)Y->specialBuffer(), incy);
}
else if(typeFloat) {
float alphaF(alpha), betaF(beta);
status = cublasSgemv(*handle, transAblas, transA ? N : M, transA ? M : N, &alphaF, (float*)pA->specialBuffer(), lda, (float*)X->specialBuffer(), incx, &betaF, (float*)Y->specialBuffer(), incy);
}
if (status != CUBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status);
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult);
NDArray::registerSpecialUse({Y}, {pA, X});
if(pA != A)
delete pA;
}
return Y;
}
////////////////////////////////////////////////////////////////////////////
// (X * Y) = Z[0]
NDArray* MmulHelper::dot(const NDArray* X, const NDArray* Y, sd::NDArray* Z, const double alpha, const double beta) {
int xLenDim(0), yLenDim(0);
if(!shape::isCommonVector(X->shapeInfo(), xLenDim))
throw std::runtime_error("MmulHelper::dot cuda: X array must be vector !");
if(!shape::isCommonVector(Y->shapeInfo(), yLenDim))
throw std::runtime_error("MmulHelper::dot cuda: Y array must be vector !");
if(Z != nullptr && !Z->isScalar())
throw std::runtime_error("MmulHelper::dot cuda: Z array must be scalar !");
const auto length = X->lengthOf();
if(Y->lengthOf() != length)
throw std::runtime_error("MmulHelper::dot cuda: lengths of input vectors are different !");
if(Z == nullptr)
Z = new NDArray(DataTypeUtils::pickPairwiseResultType(X->dataType(), Y->dataType()), X->getContext());
const Nd4jLong incx = X->strideAt(xLenDim);
const Nd4jLong incy = Y->strideAt(yLenDim);
const auto xType = X->dataType();
const auto yType = Y->dataType();
const auto zType = Z->dataType();
if(!X->isActualOnDeviceSide()) X->syncToDevice();
if(!Y->isActualOnDeviceSide()) Y->syncToDevice();
if(!Z->isActualOnDeviceSide()) Z->syncToDevice();
cudaStream_t* stream = X->getContext()->getCudaStream();
dim3 threadsPerBlock(512);
dim3 blocksPerGrid(1);
if (length > 512)
threadsPerBlock.x = math::nd4j_ceil<double, int>(static_cast<double>(length) / 512);
NDArray::prepareSpecialUse({Z}, {X, Y});
//BUILD_TRIPLE_SELECTOR(xType, yType, zType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->specialBuffer(), incx, Y->specialBuffer(), incy, beta, Z->specialBuffer()), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(xType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->specialBuffer(), incx, Y->specialBuffer(), incy, beta, Z->specialBuffer()), NUMERIC_TYPES)
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::dot cuda failed !", cudaResult);
NDArray::registerSpecialUse({Z}, {X, Y});
return Z;
}
//////////////////////////////////////////////////////////////////////////////
// [bS,M,K] x [bS,K,N] = [bS,M,N]
// [bS,M,K] x [K,N] = [bS,M,N]
// [M,K] x [bS,K,N] = [bS,M,N]
// bS could stand for several axes
template <typename T1, typename T2, typename T3>
static __global__ void batchedCudaGemm(const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo,
const int* aBatchDims, const int* bBatchDims, const int* cBatchDims,
const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis,
const double alpha, const double beta) {
const T1* A = reinterpret_cast<const T1*>(vA);
const T2* B = reinterpret_cast<const T2*>(vB);
T3* C = reinterpret_cast< T3*>(vC);
__shared__ bool betaPresent;
__shared__ int aRank, bRank, cRank, K, *coords;
__shared__ Nd4jLong cLen, totalThreads;
__shared__ T3 alphaZ, betaZ;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int*>(shmem);
cLen = shape::length(cShapeInfo);
K = shape::shapeOf(const_cast<Nd4jLong*>(aShapeInfo))[aKaxis];
totalThreads = gridDim.x * blockDim.x;
aRank = shape::rank(aShapeInfo);
bRank = shape::rank(bShapeInfo);
cRank = shape::rank(cShapeInfo);
betaPresent = beta;
alphaZ = alpha;
betaZ = beta;
}
__syncthreads();
auto aCoords = coords + threadIdx.x * (aRank + bRank + cRank);
auto bCoords = aCoords + aRank;
auto cCoords = bCoords + bRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < cLen; i += totalThreads) {
// evaluate C coordinates
shape::index2coords(i, cShapeInfo, cCoords);
// calculate index of current batch
Nd4jLong batchInd;
if(cBatchDims != nullptr)
batchInd = shape::coords2index(cShapeInfo, cBatchDims, cRank - 2, cCoords);
// evaluate A coordinates
if(aBatchDims != nullptr)
shape::index2coords(batchInd, aShapeInfo, aBatchDims, aRank - 2, aCoords);
aCoords[aMaxis] = cCoords[cMaxis];
aCoords[aKaxis] = 0;
// evaluate B coordinates
if(bBatchDims != nullptr)
shape::index2coords(batchInd, bShapeInfo, bBatchDims, bRank - 2, bCoords);
bCoords[bKaxis] = 0;
bCoords[bNaxis] = cCoords[cNaxis];
auto aOffset = shape::getOffset(aShapeInfo, aCoords);
auto bOffset = shape::getOffset(bShapeInfo, bCoords);
T3 val = A[aOffset] * B[bOffset]; // first iteration
for (uint j = 1; j < K; ++j) { // rest iterations
aOffset += shape::stride(aShapeInfo)[aKaxis];
bOffset += shape::stride(bShapeInfo)[bKaxis];
val = val + A[aOffset] * B[bOffset];
}
auto cOffset = shape::getOffset(cShapeInfo, cCoords);
if(betaPresent)
C[cOffset] = alphaZ * val + betaZ * C[cOffset];
else
C[cOffset] = alphaZ * val;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void batchedGemm(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, cudaStream_t *stream, const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo, const int* aBatchDims, const int* bBatchDims, const int* cBatchDims, const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis, const double alpha, const double beta) {
batchedCudaGemm<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vA, aShapeInfo, vB, bShapeInfo, vC, cShapeInfo, aBatchDims, bBatchDims, cBatchDims, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta);
}
///////////////////////////////////////////////////////////////////
NDArray* MmulHelper::mmulNxN(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) {
const int aRank = A->rankOf();
const int bRank = B->rankOf();
// input ranks validation
if(aRank > bRank && bRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
else if(bRank > aRank && aRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
else if (aRank == bRank ) {
for(int i = 0; i < aRank - 2; ++i)
if(A->sizeAt(i) != B->sizeAt(i))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
}
if(A->sizeAt(-1) != B->sizeAt(-2))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
// validation of C array
std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
if(C != nullptr ) {
if(!C->isSameShape(cExpectedShape))
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !");
}
else
C = new NDArray(outOrder, cExpectedShape, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext());
if (C->isEmpty())
return C;
const int cRank = C->rankOf();
const int aMaxis(aRank-2), aKaxis(aRank-1), bKaxis(bRank-2), bNaxis(bRank-1), cMaxis(cRank-2), cNaxis(cRank-1);
const int threadsPerBlock = MAX_NUM_THREADS / 8;
const int blocksPerGrid = (C->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * (aRank + bRank + cRank) + 128;
PointersManager manager(A->getContext(), "MmulHelper::mmulNxN");
const int *aBatchDims(nullptr), *bBatchDims(nullptr), *cBatchDims(nullptr);
if(aRank > 2)
aBatchDims = reinterpret_cast<int*>(manager.replicatePointer(ShapeUtils::evalDimsToExclude(aRank, {aMaxis, aKaxis}).data(), (aRank - 2) * sizeof(int)));
if(bRank > 2)
bBatchDims = reinterpret_cast<int*>(manager.replicatePointer(ShapeUtils::evalDimsToExclude(bRank, {bKaxis, bNaxis}).data(), (bRank - 2) * sizeof(int)));
if(cRank > 2)
cBatchDims = reinterpret_cast<int*>(manager.replicatePointer(ShapeUtils::evalDimsToExclude(cRank, {cMaxis, cNaxis}).data(), (cRank - 2) * sizeof(int)));
NDArray::prepareSpecialUse({C}, {A, B});
// BUILD_TRIPLE_SELECTOR(A->dataType(), b->dataType(), C->dataType(), batchedGemm, (blocksPerGrid, threadsPerBlock, A->getContext()->getCudaStream(), A->specialBuffer(), A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(), C->special(), aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(A->dataType(), batchedGemm, (blocksPerGrid, threadsPerBlock, sharedMem, A->getContext()->getCudaStream(), A->specialBuffer(), A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(), C->specialShapeInfo(), aBatchDims, bBatchDims, cBatchDims, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta), NUMERIC_TYPES)
NDArray::registerSpecialUse({C}, {A, B});
manager.synchronize();
return C;
}
/*
//////////////////////////////////////////////////////////////////////////////
// MXN x N = M
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaGemv(const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) {
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
T2* X = reinterpret_cast<T2*>(const_cast<void*>(vX));
T3* Y = reinterpret_cast<T3*>(vY);
__shared__ T3 alphaZ, betaZ;
__shared__ Nd4jLong strideArow, strideAcol;
const int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row == 0) {
alphaZ = alpha;
betaZ = beta;
if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; }
}
__syncthreads();
T3 val = 0;
if (row < M)
for (int i = 0; i < N; i++)
val = val + A[row * strideArow + i * strideAcol] * X[i * incx];
Y[row * incy] = alphaZ * val + betaZ * Y[row * incy];
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualGemv(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) {
usualCudaGemv<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, M, N, alpha, vA, lda, vX, incx, beta, vY, incy);
}
*/
/*
//////////////////////////////////////////////////////////////////////////////
MXK x KxN = MxN
C array must be in f order
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaGemm(const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) {
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
T2* B = reinterpret_cast<T2*>(const_cast<void*>(vB));
T3* C = reinterpret_cast<T3*>(vC);
__shared__ T3 alphaZ, betaZ;
__shared__ Nd4jLong strideArow, strideAcol, strideBrow, strideBcol;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row == 0 && col == 0) {
alphaZ = alpha;
betaZ = beta;
if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; }
if(transB) { strideBrow = ldb; strideBcol = 1; } else { strideBrow = 1; strideBcol = ldb; }
}
__syncthreads();
T3 val = 0;
if (row < M && col < N)
for (int i = 0; i < K; i++)
val = val + A[row * strideArow + i * strideAcol] * B[i * strideBrow + col * strideBcol];
C[row + col * ldc] = alphaZ * val + betaZ * C[row + col * ldc];
}
//////////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualGemm(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) {
usualCudaGemm<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, transB, M, N, K, alpha, vA, lda, vB, ldb, beta, vC, ldc);
}
*/
//////////////////////////////////////////////////////////////////////////
/*
NDArray* MmulHelper::mmulNxNold1(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) {
const int aRank = A->rankOf();
const int bRank = B->rankOf();
// input ranks validation
if(aRank > bRank && bRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
else if(bRank > aRank && aRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
else if (aRank == bRank ) {
for(int i = 0; i < aRank - 2; ++i)
if(A->sizeAt(i) != B->sizeAt(i))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
}
if(A->sizeAt(-1) != B->sizeAt(-2))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
// validation of C array
std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
if(C != nullptr ) {
if(!C->isSameShape(cExpectedShape))
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !");
}
else {
C = new NDArray(outOrder, cExpectedShape, B->dataType());
}
// multiplication
const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(C->rankOf(), {-2, -1});
const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(C->shapeInfo(), dimsToExclude);
std::vector<Nd4jLong> idxRanges(2 * C->rankOf());
// #pragma omp parallel for schedule(guided) firstprivate(idxRanges)
for(Nd4jLong i = 0; i < numOfSubArrs; ++i) {
ShapeUtils::evalIdxRangesForSubArr(i, C->shapeInfo(), dimsToExclude, idxRanges.data());
NDArray cSubArr = (*C)(idxRanges);
if(aRank > bRank) {
NDArray aSubArr = (*A)(idxRanges);
mmulMxM(&aSubArr, B, &cSubArr, 1., 0., outOrder);
}
else if(bRank > aRank) {
NDArray bSubArr = (*B)(idxRanges);
mmulMxM(A, &bSubArr, &cSubArr, 1., 0, outOrder);
}
else {
NDArray aSubArr = (*A)(idxRanges);
NDArray bSubArr = (*B)(idxRanges);
mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., outOrder);
}
}
return C;
}
*/
//////////////////////////////////////////////////////////////////////////
// [bS,M,K] x [bS,K,N] = [bS,M,N]
// [bS,M,K] x [K,N] = [bS,M,N]
// [M,K] x [bS,K,N] = [bS,M,N]
// bS could stand for several axes
/*
NDArray* MmulHelper::mmulNxNold2(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) {
const int aRank = A->rankOf();
const int bRank = B->rankOf();
// input ranks validation
if(aRank > bRank && bRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
else if(bRank > aRank && aRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
else if (aRank == bRank ) {
for(int i = 0; i < aRank - 2; ++i)
if(A->sizeAt(i) != B->sizeAt(i))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
}
if(A->sizeAt(-1) != B->sizeAt(-2))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
// validation of C array
std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
if(C != nullptr ) {
if(!C->isSameShape(cExpectedShape))
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !");
}
else
C = new NDArray(outOrder, cExpectedShape, B->dataType());
const int cRank = C->rankOf();
const auto M = A->sizeAt(-2);
const auto K = A->sizeAt(-1);
const auto N = B->sizeAt(-1);
NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C));
std::vector<NDArray*> toDelete;
bool aMcont = M == 1 || A->strideAt(-2) == 1;
bool aKcont = K == 1 || A->strideAt(-1) == 1;
bool bKcont = K == 1 || B->strideAt(-2) == 1;
bool bNcont = N == 1 || B->strideAt(-1) == 1;
bool cMcont = M == 1 || C->strideAt(-2) == 1;
bool cNcont = N == 1 || C->strideAt(-1) == 1;
if(!aMcont && !aKcont) {
pA = new NDArray(A->dup('c'));
toDelete.push_back(pA);
aKcont = true;
}
if(!bKcont && !bNcont) {
pB = new NDArray(B->dup('c'));
toDelete.push_back(pB);
bNcont = true;
}
std::vector<int> permut(cRank);
if(!cMcont) {
std::iota(permut.begin(), permut.end(), 0);
permut[cRank - 2] = cRank - 1;
permut[cRank - 1] = cRank - 2; // swap two last dimensions [..., M,N] -> [..., N,M]
auto Cpermut = C->permute(permut);
pC = new NDArray('c', Cpermut.getShapeAsVector(), Cpermut.dataType(), A->getContext());
pC->assign(Cpermut);
toDelete.push_back(pC);
cMcont = true;
}
const auto aType = pA->dataType();
const auto bType = pB->dataType();
const auto cType = pC->dataType();
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
bool badTypes = false;
cudaDataType_t cudaType, cudaAType, cudaBType, cudaCType;
if(ABC && aType == DataType::HALF) {
cudaType = cudaAType = cudaBType = cudaCType = CUDA_R_16F;
}
else if(ABC && aType == DataType::FLOAT32) {
cudaType = cudaAType = cudaBType = cudaCType = CUDA_R_32F;
}
else if(ABC && aType == DataType::DOUBLE) {
cudaType = cudaAType = cudaBType = cudaCType = CUDA_R_64F;
}
else if(AB && cType == DataType::FLOAT32 && aType == DataType::INT8) {
cudaType = cudaCType = CUDA_R_32F;
cudaAType = cudaBType = CUDA_R_8I;
}
else if(AB && cType == DataType::FLOAT32 && aType == DataType::HALF) {
cudaType = cudaCType = CUDA_R_32F;
cudaAType = cudaBType = CUDA_R_16F;
}
else
badTypes = true;
const int bS = pC->lengthOf() / (M*N);
const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(cRank, {-2, -1});
NDArray::prepareSpecialUse({pC}, {pA, pB});
if(!badTypes) {
std::vector<Nd4jLong> subArrOffsets(bS);
std::vector<Nd4jLong> subArrShapeInfo(shape::shapeInfoLength(2)); // all sub-arrays have rank = 2
std::vector<void*> aSubArrs(bS), bSubArrs(bS), cSubArrs(bS);
if(aRank > 2)
shape::calcSubArrsShapeInfoAndOffsets(pA->shapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(), subArrShapeInfo.data(), subArrOffsets.data());
for (int i = 0; i < bS; ++i)
aSubArrs[i] = aRank == 2 ? pA->specialBuffer() : pA->specialBuffer() + subArrOffsets[i] * pA->sizeOfT();
if(bRank > 2)
shape::calcSubArrsShapeInfoAndOffsets(pB->shapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(), subArrShapeInfo.data(), subArrOffsets.data());
for (int i = 0; i < bS; ++i)
bSubArrs[i] = bRank == 2 ? pB->specialBuffer() : pB->specialBuffer() + subArrOffsets[i] * pB->sizeOfT();
shape::calcSubArrsShapeInfoAndOffsets(pC->shapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(), subArrShapeInfo.data(), subArrOffsets.data());
for (int i = 0; i < bS; ++i)
cSubArrs[i] = pC->specialBuffer() + subArrOffsets[i] * pC->sizeOfT();
PointersManager manager(A->getContext(), "mmulNxN");
const void** aSubArrsCuda = reinterpret_cast<const void **>(manager.replicatePointer(aSubArrs.data(), aSubArrs.size() * sizeof(void*)));
const void** bSubArrsCuda = reinterpret_cast<const void **>(manager.replicatePointer(bSubArrs.data(), bSubArrs.size() * sizeof(void*)));
void** cSubArrsCuda = reinterpret_cast< void **>(manager.replicatePointer(cSubArrs.data(), cSubArrs.size() * sizeof(void*)));
const bool transA = !aMcont;
const bool transB = !bKcont;
const int lda = (aMcont && aKcont) ? M : transA ? pA->strideAt(-2) : pA->strideAt(-1);
const int ldb = (bKcont && bNcont) ? K : transB ? pB->strideAt(-2) : pB->strideAt(-1);
const int ldc = (cMcont && cNcont) ? M : C != pC ? pC->strideAt(-2) : pC->strideAt(-1);
const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transBblas = transB ? CUBLAS_OP_T : CUBLAS_OP_N;
union Coeff {__half _h; float _f; double _d; };
Coeff uAlpha, uBeta;
if(cudaType == CUDA_R_16F) {
uAlpha._h = alpha;
uBeta._h = beta;
}
else if(cudaType == CUDA_R_32F) {
uAlpha._f = alpha;
uBeta._f = beta;
}
else if(cudaType == CUDA_R_64F) {
uAlpha._d = alpha;
uBeta._d = beta;
}
auto handle = reinterpret_cast<cublasHandle_t *>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = cublasSetStream_v2(*handle, *stream);
if (status != CUBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", status);
status = cublasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &uAlpha, aSubArrsCuda, cudaAType, lda, bSubArrsCuda, cudaBType, ldb, &uBeta, cSubArrsCuda, cudaCType, ldc, bS, cudaType, CUBLAS_GEMM_DEFAULT);
if (status != CUBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", status);
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", cudaResult);
}
else {
std::vector<Nd4jLong> idxRanges(2 * pC->rankOf());
for(Nd4jLong i = 0; i < bS; ++i) {
ShapeUtils::evalIdxRangesForSubArr(i, pC->shapeInfo(), dimsToExclude, idxRanges.data());
NDArray cSubArr = (*pC)(idxRanges);
if(aRank > bRank) {
NDArray aSubArr = (*pA)(idxRanges);
mmulMxM(&aSubArr, pB, &cSubArr, 1., 0., pC->ordering());
}
else if(bRank > aRank) {
NDArray bSubArr = (*pB)(idxRanges);
mmulMxM(pA, &bSubArr, &cSubArr, 1., 0, pC->ordering());
}
else {
NDArray aSubArr = (*pA)(idxRanges);
NDArray bSubArr = (*pB)(idxRanges);
mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., pC->ordering());
}
}
}
NDArray::registerSpecialUse({pC}, {pA, pB});
if(C != pC)
C->assign(pC->permute(permut));
for(int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
return C;
}
*/
//BUILD_TRIPLE_TEMPLATE(template void usualGemm, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
//BUILD_TRIPLE_TEMPLATE(template void usualGemv, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vB, const int incx, const double beta, void* vC, const int incy), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
//BUILD_TRIPLE_TEMPLATE(template void usualDot, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES);
} |
21bffb1131abc9329137857864aee25241c12759.hip | // !!! This is a file automatically generated by hipify!!!
/*
* The MIT License (MIT)
*
* Copyright (c) 2015 Hydrodynamix
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "Common.hpp"
#include "SPHKernel.cuh"
#include "KernelHelper.cuh"
#include "CudaHelper.hpp"
#include "Operators.hpp"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
namespace Computation {
#if USE_TEXTURE_MEMORY
texture<float4, hipTextureType1D> sortedPositionTex;
texture<float4, hipTextureType1D> sortedVelocityTex;
texture<float, hipTextureType1D> sortedDensityTex;
texture<unsigned int, hipTextureType1D> cellStartTex;
texture<unsigned int, hipTextureType1D> cellEndTex;
#endif
__constant__
Data::PropertyStore devicePropsSPH;
Data::PropertyStore hostPropsSPH;
__global__ void computeDensity(Data::ParticleData particleData, Data::GridData gridData);
__global__ void computeInternalForces(Data::ParticleData particleData, Data::GridData gridData);
__global__ void integrate(Data::ParticleData particleData);
__global__ void boundary(Data::ParticleData particleData);
__global__ void computeExternalForces(Data::ParticleData particleData, float4 source);
// Poly6 Kernels
__device__ float defaultKernel(float dist, const Data::ComputeProperties& computeProps);
__device__ float4 defaultKernelGradient(float dist, const float4& r, const Data::ComputeProperties& computeProps);
__device__ float defaultKernelLaplacian(float dist, const Data::ComputeProperties& computeProps);
// Spiky Kernels
__device__ float pressureKernel(float dist, const Data::ComputeProperties& computeProps);
__device__ float4 pressureKernelGradient(float dist, const float4& r, const Data::ComputeProperties& computeProps);
__device__ float pressureKernelLaplacian(float dist, const Data::ComputeProperties& computeProps);
__device__ float viscosityKernel(float dist, const Data::ComputeProperties& computeProps);
__device__ float4 viscosityKernelGradient(float dist, const float4& r, const Data::ComputeProperties& computeProps);
__device__ float viscosityKernelLaplacian(float dist, const Data::ComputeProperties& computeProps);
void copyPropertiesToSPHKernel(const Data::PropertyStore& properties) {
hostPropsSPH = properties;
COPY_TO_SYMBOL_ASYNC(devicePropsSPH, &hostPropsSPH, sizeof(hostPropsSPH));
}
void launchSPHKernel(Data::ParticleData* particleData, Data::GridData* gridData) {
int count = hostPropsSPH.generalProperties.numParticles;
#if USE_TEXTURE_MEMORY
int cellCount = hostPropsSPH.computeProperties.numCells;
BIND_TEXTURE(sortedPositionTex, particleData->sortedPosition, count * sizeof(float4));
BIND_TEXTURE(sortedVelocityTex, particleData->sortedVelocity, count * sizeof(float4));
BIND_TEXTURE(sortedDensityTex, particleData->sortedDensity, count * sizeof(float));
BIND_TEXTURE(cellStartTex, gridData->cellStart, cellCount * sizeof(unsigned int));
BIND_TEXTURE(cellEndTex, gridData->cellEnd, cellCount * sizeof(unsigned int));
#endif
callCudaKernel(computeDensity, count, *particleData, *gridData);
callCudaKernel(computeInternalForces, count, *particleData, *gridData);
#if USE_TEXTURE_MEMORY
UNBIND_TEXTURE(sortedPositionTex);
UNBIND_TEXTURE(sortedVelocityTex);
UNBIND_TEXTURE(sortedDensityTex);
UNBIND_TEXTURE(cellStartTex);
UNBIND_TEXTURE(cellEndTex);
#endif
callCudaKernel(integrate, count, *particleData);
callCudaKernel(boundary, count, *particleData);
}
void computeExternalForcesHost(Data::ParticleData* particleData, const float4& source) {
int count = hostPropsSPH.generalProperties.numParticles;
callCudaKernel(computeExternalForces, count, *particleData, source);
}
__global__ void computeDensity(Data::ParticleData particleData, Data::GridData gridData) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= devicePropsSPH.generalProperties.numParticles) {
return;
}
Data::ComputeProperties computeProps = devicePropsSPH.computeProperties;
float density = defaultKernel(0, computeProps);
float4 posA = GET_VALUE(particleData, sortedPosition, tid);
float3 worldSizeHalf = make_float3(computeProps.worldSizeHalfX,
computeProps.worldSizeHalfY, computeProps.worldSizeHalfZ);
float3 position3D = make_float3(posA.x, posA.y, posA.z);
int3 gridPos = computeGridPosition(position3D,
worldSizeHalf, computeProps.cellSize);
unsigned int gridSize = devicePropsSPH.generalProperties.gridSize;
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourGridPos = make_int3(gridPos.x + x, gridPos.y + y, gridPos.z + z);
unsigned int gridHash = computeGridHash(neighbourGridPos, gridSize);
unsigned int startIndex = GET_VALUE(gridData, cellStart, gridHash);
if (startIndex == 0xffffffff) {
continue;
}
unsigned int endIndex = GET_VALUE(gridData, cellEnd, gridHash);
for (unsigned int i = startIndex; i < endIndex; i++) {
if (i == tid) {
continue;
}
float4 posB = GET_VALUE(particleData, sortedPosition, i);
float dist2 = lensq3D(posA - posB);
if (dist2 < computeProps.smoothingLength2 && dist2 > 0) {
density += defaultKernel(dist2, computeProps);
}
}
}
}
}
density *= computeProps.mass * computeProps.defaultKernelCoefficient;
unsigned int index = gridData.index[tid];
particleData.density[index] = density;
particleData.sortedDensity[tid] = density;
}
__global__ void computeInternalForces(Data::ParticleData particleData, Data::GridData gridData) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= devicePropsSPH.generalProperties.numParticles) {
return;
}
float4 pressureForce = make_float4(0, 0, 0, 0);
float4 viscosityForce = make_float4(0, 0, 0, 0);
float4 surfaceNormal = make_float4(0, 0, 0, 0);
float colorField = 0.0f;
float4 posA = GET_VALUE(particleData, sortedPosition, tid);
float4 velA = GET_VALUE(particleData, sortedVelocity, tid);
float densityA = GET_VALUE(particleData, sortedDensity, tid);
Data::PropertyStore deviceProps = devicePropsSPH;
float gasStiffness = deviceProps.physicalProperties.gasStiffness;
float restDensity = deviceProps.physicalProperties.restDensity;
float particleMass = deviceProps.computeProperties.mass;
float pressureA = gasStiffness * (densityA - restDensity);
float3 worldSizeHalf = make_float3(
deviceProps.computeProperties.worldSizeHalfX,
deviceProps.computeProperties.worldSizeHalfY,
deviceProps.computeProperties.worldSizeHalfZ);
float3 position3D = make_float3(posA.x, posA.y, posA.z);
int3 gridPos = computeGridPosition(position3D,
worldSizeHalf, deviceProps.computeProperties.cellSize);
unsigned int gridSize = deviceProps.generalProperties.gridSize;
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourGridPos = make_int3(gridPos.x + x, gridPos.y + y, gridPos.z + z);
unsigned int gridHash = computeGridHash(neighbourGridPos, gridSize);
unsigned int startIndex = GET_VALUE(gridData, cellStart, gridHash);
if (startIndex == 0xffffffff) {
continue;
}
unsigned int endIndex = GET_VALUE(gridData, cellEnd, gridHash);
for (unsigned int i = startIndex; i < endIndex; i++) {
if (i == tid) {
continue;
}
float4 posB = GET_VALUE(particleData, sortedPosition, i);
float4 velB = GET_VALUE(particleData, sortedVelocity, i);
float densityB = GET_VALUE(particleData, sortedDensity, i);
float pressureB = gasStiffness * (densityB - restDensity);
float4 deltaPos = posA - posB;
float dist2 = lensq3D(deltaPos);
if (dist2 >= deviceProps.computeProperties.smoothingLength2 || dist2 <= 0) {
continue;
}
float dist = sqrtf(dist2);
pressureForce += ((pressureA + pressureB) / (2.0f * densityB))
* pressureKernelGradient(dist, deltaPos, deviceProps.computeProperties);
viscosityForce += ((velB - velA) / densityB) * viscosityKernelLaplacian(dist, deviceProps.computeProperties);
surfaceNormal += defaultKernelGradient(dist2, deltaPos, deviceProps.computeProperties) / densityB;
colorField += defaultKernelLaplacian(dist2, deviceProps.computeProperties) / densityB;
}
}
}
}
pressureForce *= -particleMass * deviceProps.computeProperties.pressureKernelGradientCoefficient;
viscosityForce *= particleMass * deviceProps.physicalProperties.viscosity
* deviceProps.computeProperties.viscosityKernelLaplacianCoefficient;
surfaceNormal *= particleMass * deviceProps.computeProperties.defaultKernelGradientCoefficient;
colorField *= particleMass * deviceProps.computeProperties.defaultKernelLaplacianCoefficient;
unsigned int index = gridData.index[tid];
particleData.force[index] = pressureForce + viscosityForce;
particleData.surfaceNormal[index] = surfaceNormal;
particleData.colorField[index] = colorField;
}
__global__ void integrate(Data::ParticleData particleData) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= devicePropsSPH.generalProperties.numParticles) {
return;
}
float4 pos = particleData.position[tid];
float4 vel = particleData.velocity[tid];
float4 force = particleData.force[tid];
float4 surfaceNormal = particleData.surfaceNormal[tid];
float density = particleData.density[tid];
float colorField = particleData.colorField[tid];
float timeStep = devicePropsSPH.physicalProperties.timeStep;
force.y -= devicePropsSPH.physicalProperties.gravity * density;
float surfaceVel = 0.0f;
float surfaceNormalLensq = lensq3D(surfaceNormal);
if (surfaceNormalLensq >= devicePropsSPH.computeProperties.surfaceTensionThreshold2) {
force -= devicePropsSPH.physicalProperties.surfaceTension
* colorField * (surfaceNormal / sqrtf(surfaceNormalLensq));
surfaceVel = lensq3D(vel);
}
// lerp foam color and surface velocity
float foamFactor = timeStep * surfaceVel + (1.0f - timeStep) * pos.w;
vel += force * timeStep / density;
pos += vel * timeStep;
pos.w = fmaxf(foamFactor, 0.1f);
particleData.position[tid] = pos;
particleData.velocity[tid] = vel;
}
__global__ void boundary(Data::ParticleData particleData) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= devicePropsSPH.generalProperties.numParticles) {
return;
}
float4 pos = particleData.position[tid];
float4 vel = particleData.velocity[tid];
float worldSizeHalfX = devicePropsSPH.computeProperties.worldSizeHalfX;
float worldSizeHalfY = devicePropsSPH.computeProperties.worldSizeHalfY;
float worldSizeHalfZ = devicePropsSPH.computeProperties.worldSizeHalfZ;
float radius = devicePropsSPH.generalProperties.particleRadius;
float elasticity = devicePropsSPH.physicalProperties.elasticity;
if (pos.x + radius > worldSizeHalfX) {
pos.x = worldSizeHalfX - radius;
vel.x *= -elasticity;
} else if (pos.x - radius < -worldSizeHalfX) {
pos.x = -worldSizeHalfX + radius;
vel.x *= -elasticity;
}
if (pos.y - radius < -worldSizeHalfY) {
pos.y = -worldSizeHalfY + radius;
vel.y *= -elasticity;
}
if (pos.z + radius > worldSizeHalfZ) {
pos.z = worldSizeHalfZ - radius;
vel.z *= -elasticity;
} else if (pos.z - radius < -worldSizeHalfZ) {
pos.z = -worldSizeHalfZ + radius;
vel.z *= -elasticity;
}
particleData.position[tid] = pos;
particleData.velocity[tid] = vel;
}
__global__ void computeExternalForces(Data::ParticleData particleData, float4 source) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= devicePropsSPH.generalProperties.numParticles) {
return;
}
float4 pos = particleData.position[tid];
float4 vel = particleData.velocity[tid];
float4 delta = source - pos;
float dist = fmaxf(length3D(delta), 1.0f);
vel += delta / dist * devicePropsSPH.physicalProperties.mouseForce * devicePropsSPH.physicalProperties.timeStep;
particleData.velocity[tid] = vel;
}
__device__ float defaultKernel(float dist2, const Data::ComputeProperties& computeProps) {
return pow(computeProps.smoothingLength2 - dist2, 3);
}
__device__ float4 defaultKernelGradient(float dist2, const float4& r, const Data::ComputeProperties& computeProps) {
return r * pow(computeProps.smoothingLength2 - dist2, 2);
}
__device__ float defaultKernelLaplacian(float dist2, const Data::ComputeProperties& computeProps) {
return (computeProps.smoothingLength2 - dist2) * (3 * computeProps.smoothingLength2 - 7 * dist2);
}
__device__ float pressureKernel(float dist, const Data::ComputeProperties& computeProps) {
return pow(computeProps.smoothingLength - dist, 3);
}
__device__ float4 pressureKernelGradient(float dist, const float4& r, const Data::ComputeProperties& computeProps) {
return pow(computeProps.smoothingLength - dist, 2) * (r / dist);
}
__device__ float pressureKernelLaplacian(float dist, const Data::ComputeProperties& computeProps) {
return (computeProps.smoothingLength - dist) * (computeProps.smoothingLength - 2 * dist) * (1 / dist);
}
__device__ float viscosityKernel(float dist, const Data::ComputeProperties& computeProps) {
return ((-dist * dist * dist) / (2 * computeProps.smoothingLength3)) + ((dist * dist)
/ computeProps.smoothingLength2) + (computeProps.smoothingLength / (2 * dist)) - 1;
}
__device__ float4 viscosityKernelGradient(float dist, const float4& r, const Data::ComputeProperties& computeProps) {
return r * (-(3 * dist) / (2 * computeProps.smoothingLength3) + (2 / computeProps.smoothingLength2)
- (computeProps.smoothingLength / (2 * dist * dist * dist)));
}
__device__ float viscosityKernelLaplacian(float dist, const Data::ComputeProperties& computeProps) {
return computeProps.smoothingLength - dist;
}
}
| 21bffb1131abc9329137857864aee25241c12759.cu | /*
* The MIT License (MIT)
*
* Copyright (c) 2015 Hydrodynamix
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "Common.hpp"
#include "SPHKernel.cuh"
#include "KernelHelper.cuh"
#include "CudaHelper.hpp"
#include "Operators.hpp"
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
namespace Computation {
#if USE_TEXTURE_MEMORY
texture<float4, cudaTextureType1D> sortedPositionTex;
texture<float4, cudaTextureType1D> sortedVelocityTex;
texture<float, cudaTextureType1D> sortedDensityTex;
texture<unsigned int, cudaTextureType1D> cellStartTex;
texture<unsigned int, cudaTextureType1D> cellEndTex;
#endif
__constant__
Data::PropertyStore devicePropsSPH;
Data::PropertyStore hostPropsSPH;
__global__ void computeDensity(Data::ParticleData particleData, Data::GridData gridData);
__global__ void computeInternalForces(Data::ParticleData particleData, Data::GridData gridData);
__global__ void integrate(Data::ParticleData particleData);
__global__ void boundary(Data::ParticleData particleData);
__global__ void computeExternalForces(Data::ParticleData particleData, float4 source);
// Poly6 Kernels
__device__ float defaultKernel(float dist, const Data::ComputeProperties& computeProps);
__device__ float4 defaultKernelGradient(float dist, const float4& r, const Data::ComputeProperties& computeProps);
__device__ float defaultKernelLaplacian(float dist, const Data::ComputeProperties& computeProps);
// Spiky Kernels
__device__ float pressureKernel(float dist, const Data::ComputeProperties& computeProps);
__device__ float4 pressureKernelGradient(float dist, const float4& r, const Data::ComputeProperties& computeProps);
__device__ float pressureKernelLaplacian(float dist, const Data::ComputeProperties& computeProps);
__device__ float viscosityKernel(float dist, const Data::ComputeProperties& computeProps);
__device__ float4 viscosityKernelGradient(float dist, const float4& r, const Data::ComputeProperties& computeProps);
__device__ float viscosityKernelLaplacian(float dist, const Data::ComputeProperties& computeProps);
void copyPropertiesToSPHKernel(const Data::PropertyStore& properties) {
hostPropsSPH = properties;
COPY_TO_SYMBOL_ASYNC(devicePropsSPH, &hostPropsSPH, sizeof(hostPropsSPH));
}
void launchSPHKernel(Data::ParticleData* particleData, Data::GridData* gridData) {
int count = hostPropsSPH.generalProperties.numParticles;
#if USE_TEXTURE_MEMORY
int cellCount = hostPropsSPH.computeProperties.numCells;
BIND_TEXTURE(sortedPositionTex, particleData->sortedPosition, count * sizeof(float4));
BIND_TEXTURE(sortedVelocityTex, particleData->sortedVelocity, count * sizeof(float4));
BIND_TEXTURE(sortedDensityTex, particleData->sortedDensity, count * sizeof(float));
BIND_TEXTURE(cellStartTex, gridData->cellStart, cellCount * sizeof(unsigned int));
BIND_TEXTURE(cellEndTex, gridData->cellEnd, cellCount * sizeof(unsigned int));
#endif
callCudaKernel(computeDensity, count, *particleData, *gridData);
callCudaKernel(computeInternalForces, count, *particleData, *gridData);
#if USE_TEXTURE_MEMORY
UNBIND_TEXTURE(sortedPositionTex);
UNBIND_TEXTURE(sortedVelocityTex);
UNBIND_TEXTURE(sortedDensityTex);
UNBIND_TEXTURE(cellStartTex);
UNBIND_TEXTURE(cellEndTex);
#endif
callCudaKernel(integrate, count, *particleData);
callCudaKernel(boundary, count, *particleData);
}
void computeExternalForcesHost(Data::ParticleData* particleData, const float4& source) {
int count = hostPropsSPH.generalProperties.numParticles;
callCudaKernel(computeExternalForces, count, *particleData, source);
}
__global__ void computeDensity(Data::ParticleData particleData, Data::GridData gridData) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= devicePropsSPH.generalProperties.numParticles) {
return;
}
Data::ComputeProperties computeProps = devicePropsSPH.computeProperties;
float density = defaultKernel(0, computeProps);
float4 posA = GET_VALUE(particleData, sortedPosition, tid);
float3 worldSizeHalf = make_float3(computeProps.worldSizeHalfX,
computeProps.worldSizeHalfY, computeProps.worldSizeHalfZ);
float3 position3D = make_float3(posA.x, posA.y, posA.z);
int3 gridPos = computeGridPosition(position3D,
worldSizeHalf, computeProps.cellSize);
unsigned int gridSize = devicePropsSPH.generalProperties.gridSize;
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourGridPos = make_int3(gridPos.x + x, gridPos.y + y, gridPos.z + z);
unsigned int gridHash = computeGridHash(neighbourGridPos, gridSize);
unsigned int startIndex = GET_VALUE(gridData, cellStart, gridHash);
if (startIndex == 0xffffffff) {
continue;
}
unsigned int endIndex = GET_VALUE(gridData, cellEnd, gridHash);
for (unsigned int i = startIndex; i < endIndex; i++) {
if (i == tid) {
continue;
}
float4 posB = GET_VALUE(particleData, sortedPosition, i);
float dist2 = lensq3D(posA - posB);
if (dist2 < computeProps.smoothingLength2 && dist2 > 0) {
density += defaultKernel(dist2, computeProps);
}
}
}
}
}
density *= computeProps.mass * computeProps.defaultKernelCoefficient;
unsigned int index = gridData.index[tid];
particleData.density[index] = density;
particleData.sortedDensity[tid] = density;
}
__global__ void computeInternalForces(Data::ParticleData particleData, Data::GridData gridData) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= devicePropsSPH.generalProperties.numParticles) {
return;
}
float4 pressureForce = make_float4(0, 0, 0, 0);
float4 viscosityForce = make_float4(0, 0, 0, 0);
float4 surfaceNormal = make_float4(0, 0, 0, 0);
float colorField = 0.0f;
float4 posA = GET_VALUE(particleData, sortedPosition, tid);
float4 velA = GET_VALUE(particleData, sortedVelocity, tid);
float densityA = GET_VALUE(particleData, sortedDensity, tid);
Data::PropertyStore deviceProps = devicePropsSPH;
float gasStiffness = deviceProps.physicalProperties.gasStiffness;
float restDensity = deviceProps.physicalProperties.restDensity;
float particleMass = deviceProps.computeProperties.mass;
float pressureA = gasStiffness * (densityA - restDensity);
float3 worldSizeHalf = make_float3(
deviceProps.computeProperties.worldSizeHalfX,
deviceProps.computeProperties.worldSizeHalfY,
deviceProps.computeProperties.worldSizeHalfZ);
float3 position3D = make_float3(posA.x, posA.y, posA.z);
int3 gridPos = computeGridPosition(position3D,
worldSizeHalf, deviceProps.computeProperties.cellSize);
unsigned int gridSize = deviceProps.generalProperties.gridSize;
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourGridPos = make_int3(gridPos.x + x, gridPos.y + y, gridPos.z + z);
unsigned int gridHash = computeGridHash(neighbourGridPos, gridSize);
unsigned int startIndex = GET_VALUE(gridData, cellStart, gridHash);
if (startIndex == 0xffffffff) {
continue;
}
unsigned int endIndex = GET_VALUE(gridData, cellEnd, gridHash);
for (unsigned int i = startIndex; i < endIndex; i++) {
if (i == tid) {
continue;
}
float4 posB = GET_VALUE(particleData, sortedPosition, i);
float4 velB = GET_VALUE(particleData, sortedVelocity, i);
float densityB = GET_VALUE(particleData, sortedDensity, i);
float pressureB = gasStiffness * (densityB - restDensity);
float4 deltaPos = posA - posB;
float dist2 = lensq3D(deltaPos);
if (dist2 >= deviceProps.computeProperties.smoothingLength2 || dist2 <= 0) {
continue;
}
float dist = sqrtf(dist2);
pressureForce += ((pressureA + pressureB) / (2.0f * densityB))
* pressureKernelGradient(dist, deltaPos, deviceProps.computeProperties);
viscosityForce += ((velB - velA) / densityB) * viscosityKernelLaplacian(dist, deviceProps.computeProperties);
surfaceNormal += defaultKernelGradient(dist2, deltaPos, deviceProps.computeProperties) / densityB;
colorField += defaultKernelLaplacian(dist2, deviceProps.computeProperties) / densityB;
}
}
}
}
pressureForce *= -particleMass * deviceProps.computeProperties.pressureKernelGradientCoefficient;
viscosityForce *= particleMass * deviceProps.physicalProperties.viscosity
* deviceProps.computeProperties.viscosityKernelLaplacianCoefficient;
surfaceNormal *= particleMass * deviceProps.computeProperties.defaultKernelGradientCoefficient;
colorField *= particleMass * deviceProps.computeProperties.defaultKernelLaplacianCoefficient;
unsigned int index = gridData.index[tid];
particleData.force[index] = pressureForce + viscosityForce;
particleData.surfaceNormal[index] = surfaceNormal;
particleData.colorField[index] = colorField;
}
__global__ void integrate(Data::ParticleData particleData) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= devicePropsSPH.generalProperties.numParticles) {
return;
}
float4 pos = particleData.position[tid];
float4 vel = particleData.velocity[tid];
float4 force = particleData.force[tid];
float4 surfaceNormal = particleData.surfaceNormal[tid];
float density = particleData.density[tid];
float colorField = particleData.colorField[tid];
float timeStep = devicePropsSPH.physicalProperties.timeStep;
force.y -= devicePropsSPH.physicalProperties.gravity * density;
float surfaceVel = 0.0f;
float surfaceNormalLensq = lensq3D(surfaceNormal);
if (surfaceNormalLensq >= devicePropsSPH.computeProperties.surfaceTensionThreshold2) {
force -= devicePropsSPH.physicalProperties.surfaceTension
* colorField * (surfaceNormal / sqrtf(surfaceNormalLensq));
surfaceVel = lensq3D(vel);
}
// lerp foam color and surface velocity
float foamFactor = timeStep * surfaceVel + (1.0f - timeStep) * pos.w;
vel += force * timeStep / density;
pos += vel * timeStep;
pos.w = fmaxf(foamFactor, 0.1f);
particleData.position[tid] = pos;
particleData.velocity[tid] = vel;
}
__global__ void boundary(Data::ParticleData particleData) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= devicePropsSPH.generalProperties.numParticles) {
return;
}
float4 pos = particleData.position[tid];
float4 vel = particleData.velocity[tid];
float worldSizeHalfX = devicePropsSPH.computeProperties.worldSizeHalfX;
float worldSizeHalfY = devicePropsSPH.computeProperties.worldSizeHalfY;
float worldSizeHalfZ = devicePropsSPH.computeProperties.worldSizeHalfZ;
float radius = devicePropsSPH.generalProperties.particleRadius;
float elasticity = devicePropsSPH.physicalProperties.elasticity;
if (pos.x + radius > worldSizeHalfX) {
pos.x = worldSizeHalfX - radius;
vel.x *= -elasticity;
} else if (pos.x - radius < -worldSizeHalfX) {
pos.x = -worldSizeHalfX + radius;
vel.x *= -elasticity;
}
if (pos.y - radius < -worldSizeHalfY) {
pos.y = -worldSizeHalfY + radius;
vel.y *= -elasticity;
}
if (pos.z + radius > worldSizeHalfZ) {
pos.z = worldSizeHalfZ - radius;
vel.z *= -elasticity;
} else if (pos.z - radius < -worldSizeHalfZ) {
pos.z = -worldSizeHalfZ + radius;
vel.z *= -elasticity;
}
particleData.position[tid] = pos;
particleData.velocity[tid] = vel;
}
__global__ void computeExternalForces(Data::ParticleData particleData, float4 source) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= devicePropsSPH.generalProperties.numParticles) {
return;
}
float4 pos = particleData.position[tid];
float4 vel = particleData.velocity[tid];
float4 delta = source - pos;
float dist = fmaxf(length3D(delta), 1.0f);
vel += delta / dist * devicePropsSPH.physicalProperties.mouseForce * devicePropsSPH.physicalProperties.timeStep;
particleData.velocity[tid] = vel;
}
__device__ float defaultKernel(float dist2, const Data::ComputeProperties& computeProps) {
return pow(computeProps.smoothingLength2 - dist2, 3);
}
__device__ float4 defaultKernelGradient(float dist2, const float4& r, const Data::ComputeProperties& computeProps) {
return r * pow(computeProps.smoothingLength2 - dist2, 2);
}
__device__ float defaultKernelLaplacian(float dist2, const Data::ComputeProperties& computeProps) {
return (computeProps.smoothingLength2 - dist2) * (3 * computeProps.smoothingLength2 - 7 * dist2);
}
__device__ float pressureKernel(float dist, const Data::ComputeProperties& computeProps) {
return pow(computeProps.smoothingLength - dist, 3);
}
__device__ float4 pressureKernelGradient(float dist, const float4& r, const Data::ComputeProperties& computeProps) {
return pow(computeProps.smoothingLength - dist, 2) * (r / dist);
}
__device__ float pressureKernelLaplacian(float dist, const Data::ComputeProperties& computeProps) {
return (computeProps.smoothingLength - dist) * (computeProps.smoothingLength - 2 * dist) * (1 / dist);
}
__device__ float viscosityKernel(float dist, const Data::ComputeProperties& computeProps) {
return ((-dist * dist * dist) / (2 * computeProps.smoothingLength3)) + ((dist * dist)
/ computeProps.smoothingLength2) + (computeProps.smoothingLength / (2 * dist)) - 1;
}
__device__ float4 viscosityKernelGradient(float dist, const float4& r, const Data::ComputeProperties& computeProps) {
return r * (-(3 * dist) / (2 * computeProps.smoothingLength3) + (2 / computeProps.smoothingLength2)
- (computeProps.smoothingLength / (2 * dist * dist * dist)));
}
__device__ float viscosityKernelLaplacian(float dist, const Data::ComputeProperties& computeProps) {
return computeProps.smoothingLength - dist;
}
}
|
5a9217c619d6835a07cf8da97422b68e7f47a1af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2017 by Contributors
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file psroi_pooling.cu
* \brief psroi pooling operator
* \author Yi Li, Tairui Chen, Guodong Zhang, Jifeng Dai
*/
#include "./psroi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "../../common/cuda_utils.h"
#include "../mxnet_op.h"
#define PSROIPOOLING_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
template <typename DType>
__global__ void PSROIPoolForwardKernel(
const int count,
const DType* bottom_data,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const DType* bottom_rois,
const int output_dim,
const int group_size,
DType* top_data,
DType* mapping_channel) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(bottom_rois[1])) * spatial_scale;
DType roi_start_h = static_cast<DType>(round(bottom_rois[2])) * spatial_scale;
DType roi_end_w = static_cast<DType>(round(bottom_rois[3]) + 1.) * spatial_scale;
DType roi_end_h = static_cast<DType>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
int hstart = floor(static_cast<DType>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<DType>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0),width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = floor(static_cast<DType>(pw)* group_size / pooled_width);
int gh = floor(static_cast<DType>(ph)* group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
int c = (ctop*group_size + gh)*group_size + gw;
bottom_data += (roi_batch_ind * channels + c) * height * width;
DType out_sum = 0;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h*width + w;
out_sum += bottom_data[bottom_index];
}
}
DType bin_area = (hend - hstart)*(wend - wstart);
top_data[index] = is_empty? (DType)0. : out_sum/bin_area;
mapping_channel[index] = c;
}
}
template<typename DType>
inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
// LOG(INFO) << "PSROIPoolForward";
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
DType *top_data = out.dptr_;
DType *mapping_channel_ptr = mapping_channel.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
PSROIPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr);
PSROIPOOLING_CUDA_CHECK(hipPeekAtLastError());
}
template <typename DType>
__global__ void PSROIPoolBackwardAccKernel(
const int count,
const DType* top_diff,
const DType* mapping_channel,
const int num_rois,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
DType* bottom_diff,
const DType* bottom_rois) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(bottom_rois[1])) * spatial_scale;
DType roi_start_h = static_cast<DType>(round(bottom_rois[2])) * spatial_scale;
DType roi_end_w = static_cast<DType>(round(bottom_rois[3]) + 1.) * spatial_scale;
DType roi_end_h = static_cast<DType>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
int hstart = floor(static_cast<DType>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<DType>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
DType* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
DType bin_area = (hend - hstart)*(wend - wstart);
DType diff_val = is_empty ? (DType)0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h*width + w;
// mxnet_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
atomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
template<typename DType>
inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_) {
// LOG(INFO) << "PSROIPoolBackward";
const DType *top_diff = out_grad.dptr_;
const DType *bottom_rois = bbox.dptr_;
DType *bottom_diff = in_grad.dptr_;
DType *mapping_channel_ptr = mapping_channel.dptr_;
const int count = out_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
PSROIPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, top_diff, mapping_channel_ptr, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, output_dim_, bottom_diff, bottom_rois);
PSROIPOOLING_CUDA_CHECK(hipPeekAtLastError());
}
} // namespace cuda
template<typename DType>
inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
cuda::PSROIPoolForward(out, data, bbox, mapping_channel, spatial_scale, output_dim_, group_size_);
}
template<typename DType>
inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_) {
cuda::PSROIPoolBackwardAcc(in_grad, out_grad, bbox, mapping_channel, spatial_scale, output_dim_);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(PSROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new PSROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet | 5a9217c619d6835a07cf8da97422b68e7f47a1af.cu | /*!
* Copyright (c) 2017 by Contributors
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file psroi_pooling.cu
* \brief psroi pooling operator
* \author Yi Li, Tairui Chen, Guodong Zhang, Jifeng Dai
*/
#include "./psroi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "../../common/cuda_utils.h"
#include "../mxnet_op.h"
#define PSROIPOOLING_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
template <typename DType>
__global__ void PSROIPoolForwardKernel(
const int count,
const DType* bottom_data,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const DType* bottom_rois,
const int output_dim,
const int group_size,
DType* top_data,
DType* mapping_channel) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(bottom_rois[1])) * spatial_scale;
DType roi_start_h = static_cast<DType>(round(bottom_rois[2])) * spatial_scale;
DType roi_end_w = static_cast<DType>(round(bottom_rois[3]) + 1.) * spatial_scale;
DType roi_end_h = static_cast<DType>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
int hstart = floor(static_cast<DType>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<DType>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0),width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = floor(static_cast<DType>(pw)* group_size / pooled_width);
int gh = floor(static_cast<DType>(ph)* group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
int c = (ctop*group_size + gh)*group_size + gw;
bottom_data += (roi_batch_ind * channels + c) * height * width;
DType out_sum = 0;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h*width + w;
out_sum += bottom_data[bottom_index];
}
}
DType bin_area = (hend - hstart)*(wend - wstart);
top_data[index] = is_empty? (DType)0. : out_sum/bin_area;
mapping_channel[index] = c;
}
}
template<typename DType>
inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
// LOG(INFO) << "PSROIPoolForward";
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
DType *top_data = out.dptr_;
DType *mapping_channel_ptr = mapping_channel.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
PSROIPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr);
PSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError());
}
template <typename DType>
__global__ void PSROIPoolBackwardAccKernel(
const int count,
const DType* top_diff,
const DType* mapping_channel,
const int num_rois,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
DType* bottom_diff,
const DType* bottom_rois) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(bottom_rois[1])) * spatial_scale;
DType roi_start_h = static_cast<DType>(round(bottom_rois[2])) * spatial_scale;
DType roi_end_w = static_cast<DType>(round(bottom_rois[3]) + 1.) * spatial_scale;
DType roi_end_h = static_cast<DType>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
int hstart = floor(static_cast<DType>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<DType>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
DType* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
DType bin_area = (hend - hstart)*(wend - wstart);
DType diff_val = is_empty ? (DType)0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h*width + w;
// mxnet_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
atomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
template<typename DType>
inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_) {
// LOG(INFO) << "PSROIPoolBackward";
const DType *top_diff = out_grad.dptr_;
const DType *bottom_rois = bbox.dptr_;
DType *bottom_diff = in_grad.dptr_;
DType *mapping_channel_ptr = mapping_channel.dptr_;
const int count = out_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
PSROIPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, top_diff, mapping_channel_ptr, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, output_dim_, bottom_diff, bottom_rois);
PSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError());
}
} // namespace cuda
template<typename DType>
inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
cuda::PSROIPoolForward(out, data, bbox, mapping_channel, spatial_scale, output_dim_, group_size_);
}
template<typename DType>
inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &mapping_channel,
const float spatial_scale,
const int output_dim_) {
cuda::PSROIPoolBackwardAcc(in_grad, out_grad, bbox, mapping_channel, spatial_scale, output_dim_);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(PSROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new PSROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet |
a78699a217bc19d1e8d3fdf26f58e0e772e48377.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
@author:chenzhengqiang
@date:2018-09-25
@email:[email protected]
*/
#include "config.h"
#include "saliency.h"
#include "opencv_common.h"
#include "cuda_common.h"
__global__ void bgr2lab(const cuda::PtrStepSz<uchar> Bimg_, const cuda::PtrStepSz<uchar> Gimg_, const cuda::PtrStepSz<uchar> Rimg_,
cuda::PtrStepSz<double> Limg, cuda::PtrStepSz<double> Aimg, cuda::PtrStepSz<double> Bimg)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if ( x < Rimg_.rows && y < Rimg_.cols )
{
double sB = static_cast<double>(Bimg_(x,y));
double sG = static_cast<double>(Gimg_(x,y));
double sR = static_cast<double>(Rimg_(x,y));
double R = sR/255.0;
double G = sG/255.0;
double B = sB/255.0;
double r, g, b;
if(R <= 0.04045) r = (double)R/12.92;
else r = pow((R+0.055)/1.055,2.4);
if(G <= 0.04045) g = (double)G/12.92;
else g = pow((G+0.055)/1.055,2.4);
if(B <= 0.04045) b = (double)B/12.92;
else b = pow((B+0.055)/1.055,2.4);
double X = r*0.4124564 + g*0.3575761 + b*0.1804375;
double Y = r*0.2126729 + g*0.7151522 + b*0.0721750;
double Z = r*0.0193339 + g*0.1191920 + b*0.9503041;
//------------------------
// XYZ to LAB conversion
//------------------------
double epsilon = 0.008856; // actual CIE standard
double kappa = 903.3; // actual CIE standard
double Xr = 0.950456; // reference white
double Yr = 1.0; // reference white
double Zr = 1.088754; // reference white
double xr = X/Xr;
double yr = Y/Yr;
double zr = Z/Zr;
double fx, fy, fz;
if(xr > epsilon) fx = pow(xr, 1.0/3.0);
else fx = (kappa*xr + 16.0)/116.0;
if(yr > epsilon) fy = pow(yr, 1.0/3.0);
else fy = (kappa*yr + 16.0)/116.0;
if(zr > epsilon) fz = pow(zr, 1.0/3.0);
else fz = (kappa*zr + 16.0)/116.0;
Limg(x,y) = yr > epsilon ? (116.0*fy-16.0):(yr*kappa);
Aimg(x,y)= 500.0*(fx-fy);
Bimg(x,y)= 200.0*(fy-fz);
}
}
void obtain_saliency_with_ft( BUFFER::GLOBAL_BUFFER & global_buffer )
{
hipLaunchKernelGGL(( bgr2lab), dim3(global_buffer.BLOCKS), dim3(global_buffer.THREADS), 0, 0, global_buffer.BGR[0], global_buffer.BGR[1], global_buffer.BGR[2],
global_buffer.Limg, global_buffer.Aimg, global_buffer.Bimg);
double mean_lab[3];
mean_lab[0] = (cuda::sum(global_buffer.Limg)).val[0] / (global_buffer.IMAGE_PIXELS);
mean_lab[1] = (cuda::sum(global_buffer.Aimg)).val[0] / (global_buffer.IMAGE_PIXELS);
mean_lab[2] = (cuda::sum(global_buffer.Bimg)).val[0] / (global_buffer.IMAGE_PIXELS);
cuda::Stream stream;
cuda::absdiff(global_buffer.Limg, mean_lab[0], global_buffer.Limg, stream);
cuda::pow(global_buffer.Limg, 2, global_buffer.Limg, stream);
cuda::absdiff(global_buffer.Aimg, mean_lab[1], global_buffer.Aimg, stream);
cuda::pow(global_buffer.Aimg, 2, global_buffer.Aimg, stream);
cuda::absdiff(global_buffer.Bimg, mean_lab[2], global_buffer.Bimg, stream);
cuda::pow(global_buffer.Bimg, 2, global_buffer.Bimg, stream);
cuda::add(global_buffer.Limg, global_buffer.Aimg, global_buffer.norm, global_buffer.tmp, -1, stream);
cuda::add(global_buffer.Bimg, global_buffer.norm, global_buffer.norm, global_buffer.tmp, -1, stream);
cuda::normalize(global_buffer.norm, global_buffer.norm, 1, 0, NORM_MINMAX, -1, global_buffer.tmp, stream);
global_buffer.norm.convertTo(global_buffer.saliency_map, CV_8UC1, 255, 0, stream);
stream.waitForCompletion();
}
| a78699a217bc19d1e8d3fdf26f58e0e772e48377.cu | /*
@author:chenzhengqiang
@date:2018-09-25
@email:[email protected]
*/
#include "config.h"
#include "saliency.h"
#include "opencv_common.h"
#include "cuda_common.h"
__global__ void bgr2lab(const cuda::PtrStepSz<uchar> Bimg_, const cuda::PtrStepSz<uchar> Gimg_, const cuda::PtrStepSz<uchar> Rimg_,
cuda::PtrStepSz<double> Limg, cuda::PtrStepSz<double> Aimg, cuda::PtrStepSz<double> Bimg)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if ( x < Rimg_.rows && y < Rimg_.cols )
{
double sB = static_cast<double>(Bimg_(x,y));
double sG = static_cast<double>(Gimg_(x,y));
double sR = static_cast<double>(Rimg_(x,y));
double R = sR/255.0;
double G = sG/255.0;
double B = sB/255.0;
double r, g, b;
if(R <= 0.04045) r = (double)R/12.92;
else r = pow((R+0.055)/1.055,2.4);
if(G <= 0.04045) g = (double)G/12.92;
else g = pow((G+0.055)/1.055,2.4);
if(B <= 0.04045) b = (double)B/12.92;
else b = pow((B+0.055)/1.055,2.4);
double X = r*0.4124564 + g*0.3575761 + b*0.1804375;
double Y = r*0.2126729 + g*0.7151522 + b*0.0721750;
double Z = r*0.0193339 + g*0.1191920 + b*0.9503041;
//------------------------
// XYZ to LAB conversion
//------------------------
double epsilon = 0.008856; // actual CIE standard
double kappa = 903.3; // actual CIE standard
double Xr = 0.950456; // reference white
double Yr = 1.0; // reference white
double Zr = 1.088754; // reference white
double xr = X/Xr;
double yr = Y/Yr;
double zr = Z/Zr;
double fx, fy, fz;
if(xr > epsilon) fx = pow(xr, 1.0/3.0);
else fx = (kappa*xr + 16.0)/116.0;
if(yr > epsilon) fy = pow(yr, 1.0/3.0);
else fy = (kappa*yr + 16.0)/116.0;
if(zr > epsilon) fz = pow(zr, 1.0/3.0);
else fz = (kappa*zr + 16.0)/116.0;
Limg(x,y) = yr > epsilon ? (116.0*fy-16.0):(yr*kappa);
Aimg(x,y)= 500.0*(fx-fy);
Bimg(x,y)= 200.0*(fy-fz);
}
}
void obtain_saliency_with_ft( BUFFER::GLOBAL_BUFFER & global_buffer )
{
bgr2lab<<<global_buffer.BLOCKS, global_buffer.THREADS>>>(global_buffer.BGR[0], global_buffer.BGR[1], global_buffer.BGR[2],
global_buffer.Limg, global_buffer.Aimg, global_buffer.Bimg);
double mean_lab[3];
mean_lab[0] = (cuda::sum(global_buffer.Limg)).val[0] / (global_buffer.IMAGE_PIXELS);
mean_lab[1] = (cuda::sum(global_buffer.Aimg)).val[0] / (global_buffer.IMAGE_PIXELS);
mean_lab[2] = (cuda::sum(global_buffer.Bimg)).val[0] / (global_buffer.IMAGE_PIXELS);
cuda::Stream stream;
cuda::absdiff(global_buffer.Limg, mean_lab[0], global_buffer.Limg, stream);
cuda::pow(global_buffer.Limg, 2, global_buffer.Limg, stream);
cuda::absdiff(global_buffer.Aimg, mean_lab[1], global_buffer.Aimg, stream);
cuda::pow(global_buffer.Aimg, 2, global_buffer.Aimg, stream);
cuda::absdiff(global_buffer.Bimg, mean_lab[2], global_buffer.Bimg, stream);
cuda::pow(global_buffer.Bimg, 2, global_buffer.Bimg, stream);
cuda::add(global_buffer.Limg, global_buffer.Aimg, global_buffer.norm, global_buffer.tmp, -1, stream);
cuda::add(global_buffer.Bimg, global_buffer.norm, global_buffer.norm, global_buffer.tmp, -1, stream);
cuda::normalize(global_buffer.norm, global_buffer.norm, 1, 0, NORM_MINMAX, -1, global_buffer.tmp, stream);
global_buffer.norm.convertTo(global_buffer.saliency_map, CV_8UC1, 255, 0, stream);
stream.waitForCompletion();
}
|
fd3f889c61bd84cc9de7798b84f0ee3e3b9cbb17.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Sudoku -- Puzzle Solver on GPU using CUDA
// ----------------------------------------------------------------
/**
* @file
* sudoku.cu
*
* @brief main sudoku file to init and execute
*/
#pragma once
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// includes, kernels
#include "kernels_hip.cuh"
// includes, utilities
#include "util/error_utils.cuh"
#include "util/io_utils.cuh"
#include "data.cuh"
int main(int argc, char** argv) {
/* Gets arguments from command line and puzzle from a file */
CommandLineArgs * build = new CommandLineArgs;
input(argc, argv, build);
KernelManager((*build).size, &(*build).unsolved, (*build).graphpics);
}
void KernelManager(Size n, Puzzle * h_unsolved, bool o_graphics) {
/* CUDA event setup */
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
/* Memory Allocations */
memsize = sizeof(Puzzle) * (*build).size * (*build).size;
Puzzle * d_unsolved;
ERROR_CHECK( hipMalloc((void**) &d_unsolved, memsize) );
ERROR_CHECK( hipMemcpy(d_unsolved, h_unsolved, memsize,
hipMemcpyHostToDevice) );
float elapsedTime;
hipEventRecord(start, 0);
// TODO: Kernel execution
// TODO: All of them can go one by one,
// TODO: we'll just need to reset event record,
// TODO: for multiple timing/performance measurements.
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
Puzzle * d_solved;
ERROR_CHECK( hipMalloc((void**) &d_solved, memsize) );
/* Destroy CUDA event */
hipEventDestroy(start);
hipEventDestroy(stop);
// TODO: Terminal Output will go here.
/* Free Memory Allocations */
free(h_unsolved);
ERROR_CHECK( hipFree(d_unsolved) );
ERROR_CHECK( hipFree(d_solved) );
}
| fd3f889c61bd84cc9de7798b84f0ee3e3b9cbb17.cu | // ----------------------------------------------------------------
// Sudoku -- Puzzle Solver on GPU using CUDA
// ----------------------------------------------------------------
/**
* @file
* sudoku.cu
*
* @brief main sudoku file to init and execute
*/
#pragma once
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cuda.h>
#include <cuda_runtime.h>
// includes, kernels
#include "kernels.cuh"
// includes, utilities
#include "util/error_utils.cuh"
#include "util/io_utils.cuh"
#include "data.cuh"
int main(int argc, char** argv) {
/* Gets arguments from command line and puzzle from a file */
CommandLineArgs * build = new CommandLineArgs;
input(argc, argv, build);
KernelManager((*build).size, &(*build).unsolved, (*build).graphpics);
}
void KernelManager(Size n, Puzzle * h_unsolved, bool o_graphics) {
/* CUDA event setup */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* Memory Allocations */
memsize = sizeof(Puzzle) * (*build).size * (*build).size;
Puzzle * d_unsolved;
ERROR_CHECK( cudaMalloc((void**) &d_unsolved, memsize) );
ERROR_CHECK( cudaMemcpy(d_unsolved, h_unsolved, memsize,
cudaMemcpyHostToDevice) );
float elapsedTime;
cudaEventRecord(start, 0);
// TODO: Kernel execution
// TODO: All of them can go one by one,
// TODO: we'll just need to reset event record,
// TODO: for multiple timing/performance measurements.
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
Puzzle * d_solved;
ERROR_CHECK( cudaMalloc((void**) &d_solved, memsize) );
/* Destroy CUDA event */
cudaEventDestroy(start);
cudaEventDestroy(stop);
// TODO: Terminal Output will go here.
/* Free Memory Allocations */
free(h_unsolved);
ERROR_CHECK( cudaFree(d_unsolved) );
ERROR_CHECK( cudaFree(d_solved) );
}
|
8634012a4d2ccc5d9934613a1cad2f69231f695a.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2018 Ole-Christoffer Granmo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This code implements the Tsetlin Machine from paper arXiv:1804.01508
https://arxiv.org/abs/1804.01508
*/
#include "TsetlinMachineKernels.cuh"
#include "TsetlinMachine.cuh"
#include "TsetlinMachineConfig.cuh"
#include "GPUConfig.cuh"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
/**********************************/
/***** Constructor/Destructor *****/
/**********************************/
TsetlinMachine::TsetlinMachine()
{
hipMallocManaged(&class_sum, sizeof(int));
hipMallocManaged(&ta_state, CLAUSES*FEATURES*2*sizeof(int));
hipMallocManaged(&clause_output, CLAUSES*sizeof(int));
hipMallocManaged(&clause_feedback, CLAUSES*sizeof(int));
hipMallocManaged(&all_exclude, CLAUSES*sizeof(int));
initialize();
}
TsetlinMachine::~TsetlinMachine()
{
hipFree(class_sum);
hipFree(ta_state);
hipFree(clause_output);
hipFree(clause_feedback);
hipFree(all_exclude);
}
void TsetlinMachine::initialize()
{
// Initialize Tsetlin Automata states
for (int j = 0; j < CLAUSES; j++) {
for (int k = 0; k < FEATURES; k++) {
int id = j*FEATURES*2 + k*2;
if (1.0*rand()/RAND_MAX <= 0.5) {
ta_state[id] = NUMBER_OF_STATES+1;
} else {
ta_state[id] = NUMBER_OF_STATES;
}
if (1.0*rand()/RAND_MAX <= 0.5) {
ta_state[id+1] = NUMBER_OF_STATES+1;
} else {
ta_state[id+1] = NUMBER_OF_STATES;
}
}
}
}
/****************************/
/***** Public Functions *****/
/****************************/
void TsetlinMachine::update(hiprandState_t *devStates, int *Xi, int target, float s)
{
hipLaunchKernelGGL(( initialize_clause_output), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, clause_output);
hipDeviceSynchronize();
hipLaunchKernelGGL(( calculate_clause_output), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, ta_state, clause_output, Xi);
hipDeviceSynchronize();
*class_sum = 0;
hipLaunchKernelGGL(( sum_up_class_votes), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, clause_output, class_sum);
hipDeviceSynchronize();
if (*class_sum > THRESHOLD) {
*class_sum = THRESHOLD;
} else if (*class_sum < -THRESHOLD) {
*class_sum = -THRESHOLD;
}
hipLaunchKernelGGL(( generate_clause_feedback), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, devStates, clause_feedback, class_sum, target);
hipDeviceSynchronize();
hipLaunchKernelGGL(( type_i_feedback), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, devStates, ta_state, clause_feedback, clause_output, Xi, s);
hipDeviceSynchronize();
hipLaunchKernelGGL(( type_ii_feedback), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, ta_state, clause_feedback, clause_output, Xi);
hipDeviceSynchronize();
}
int TsetlinMachine::get_state(int id)
{
return ta_state[id];
}
int TsetlinMachine::score(int *Xi)
{
hipLaunchKernelGGL(( initialize_clause_output_predict), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, clause_output, all_exclude);
hipDeviceSynchronize();
hipLaunchKernelGGL(( calculate_clause_output_predict), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, ta_state, clause_output, all_exclude, Xi);
hipDeviceSynchronize();
hipLaunchKernelGGL(( update_with_all_exclude), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, clause_output, all_exclude);
hipDeviceSynchronize();
*class_sum = 0;
hipLaunchKernelGGL(( sum_up_class_votes), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, clause_output, class_sum);
hipDeviceSynchronize();
if (*class_sum > THRESHOLD) {
*class_sum = THRESHOLD;
} else if (*class_sum < -THRESHOLD) {
*class_sum = -THRESHOLD;
}
return *class_sum;
}
| 8634012a4d2ccc5d9934613a1cad2f69231f695a.cu | /*
Copyright (c) 2018 Ole-Christoffer Granmo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This code implements the Tsetlin Machine from paper arXiv:1804.01508
https://arxiv.org/abs/1804.01508
*/
#include "TsetlinMachineKernels.cuh"
#include "TsetlinMachine.cuh"
#include "TsetlinMachineConfig.cuh"
#include "GPUConfig.cuh"
#include <stdio.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
/**********************************/
/***** Constructor/Destructor *****/
/**********************************/
TsetlinMachine::TsetlinMachine()
{
cudaMallocManaged(&class_sum, sizeof(int));
cudaMallocManaged(&ta_state, CLAUSES*FEATURES*2*sizeof(int));
cudaMallocManaged(&clause_output, CLAUSES*sizeof(int));
cudaMallocManaged(&clause_feedback, CLAUSES*sizeof(int));
cudaMallocManaged(&all_exclude, CLAUSES*sizeof(int));
initialize();
}
TsetlinMachine::~TsetlinMachine()
{
cudaFree(class_sum);
cudaFree(ta_state);
cudaFree(clause_output);
cudaFree(clause_feedback);
cudaFree(all_exclude);
}
void TsetlinMachine::initialize()
{
// Initialize Tsetlin Automata states
for (int j = 0; j < CLAUSES; j++) {
for (int k = 0; k < FEATURES; k++) {
int id = j*FEATURES*2 + k*2;
if (1.0*rand()/RAND_MAX <= 0.5) {
ta_state[id] = NUMBER_OF_STATES+1;
} else {
ta_state[id] = NUMBER_OF_STATES;
}
if (1.0*rand()/RAND_MAX <= 0.5) {
ta_state[id+1] = NUMBER_OF_STATES+1;
} else {
ta_state[id+1] = NUMBER_OF_STATES;
}
}
}
}
/****************************/
/***** Public Functions *****/
/****************************/
void TsetlinMachine::update(curandState *devStates, int *Xi, int target, float s)
{
initialize_clause_output<<<GRID_SIZE,BLOCK_SIZE>>>(clause_output);
cudaDeviceSynchronize();
calculate_clause_output<<<GRID_SIZE,BLOCK_SIZE>>>(ta_state, clause_output, Xi);
cudaDeviceSynchronize();
*class_sum = 0;
sum_up_class_votes<<<GRID_SIZE,BLOCK_SIZE>>>(clause_output, class_sum);
cudaDeviceSynchronize();
if (*class_sum > THRESHOLD) {
*class_sum = THRESHOLD;
} else if (*class_sum < -THRESHOLD) {
*class_sum = -THRESHOLD;
}
generate_clause_feedback<<<GRID_SIZE,BLOCK_SIZE>>>(devStates, clause_feedback, class_sum, target);
cudaDeviceSynchronize();
type_i_feedback<<<GRID_SIZE,BLOCK_SIZE>>>(devStates, ta_state, clause_feedback, clause_output, Xi, s);
cudaDeviceSynchronize();
type_ii_feedback<<<GRID_SIZE,BLOCK_SIZE>>>(ta_state, clause_feedback, clause_output, Xi);
cudaDeviceSynchronize();
}
int TsetlinMachine::get_state(int id)
{
return ta_state[id];
}
int TsetlinMachine::score(int *Xi)
{
initialize_clause_output_predict<<<GRID_SIZE,BLOCK_SIZE>>>(clause_output, all_exclude);
cudaDeviceSynchronize();
calculate_clause_output_predict<<<GRID_SIZE,BLOCK_SIZE>>>(ta_state, clause_output, all_exclude, Xi);
cudaDeviceSynchronize();
update_with_all_exclude<<<GRID_SIZE,BLOCK_SIZE>>>(clause_output, all_exclude);
cudaDeviceSynchronize();
*class_sum = 0;
sum_up_class_votes<<<GRID_SIZE,BLOCK_SIZE>>>(clause_output, class_sum);
cudaDeviceSynchronize();
if (*class_sum > THRESHOLD) {
*class_sum = THRESHOLD;
} else if (*class_sum < -THRESHOLD) {
*class_sum = -THRESHOLD;
}
return *class_sum;
}
|
4835f129b8f5fb7585a215f6845bc445a8a2c5c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lightfield.hpp"
#include "cutil_math.hpp"
#include "cuda_helpers.hh"
#include <stdio.h>
#include <cmath>
#define PI 3.14159265358979f
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define TREE
namespace lightfield {
/* data structures */
struct LFData *camer_info = 0;
static struct LFCamera *gpuCam = 0;
static uint8_t *gpucolor = 0;
static struct LFData *gpuData = 0;
static struct LFData *cpuData = 0;
static float radius = 0;
// Proivdes data for each of the cameras in the lightfield.
// The up vector is not stored in the data, so you MUST at least
// use the code that computes the up vector in your solution.
static __host__ __device__ void get_lightfield_camera( LFCamera* camerap, const LFData* lfdata, size_t camera_index )
{
// read camera info from lfdata
float3 neg_dir = lfdata->camera_positions[camera_index];
camerap->position = neg_dir * lfdata->camera_radius;
camerap->direction = -neg_dir;
camerap->fov = lfdata->camera_fov;
camerap->aspect_ratio = lfdata->camera_aspect;
// compute up vector the same way as used in the original renders (not in data)
float3 up_tangent;
if (fabs(neg_dir.x) < 0.001f && fabs(neg_dir.z) < 0.001f)
up_tangent = make_float3(-1.0f, 0.0f, 0.0f);
else if (fabs(neg_dir.y) < 0.001f)
up_tangent = make_float3(0.0f, 1.0f, 0.0f);
else
up_tangent = make_float3(-neg_dir.x, 1.0f/neg_dir.y-neg_dir.y, -neg_dir.z);
camerap->up = normalize(up_tangent); // NORMALIZE!!!!!!
}
static __host__ __device__ LFCamera get_lightfield_camera(const LFData* lfdata, uint16_t camera_index )
{
LFCamera camerap;
// read camera info from lfdata
float3 neg_dir = lfdata->camera_positions[camera_index];
camerap.position = neg_dir * lfdata->camera_radius;
camerap.direction = -neg_dir;
camerap.fov = lfdata->camera_fov;
camerap.aspect_ratio = lfdata->camera_aspect;
// compute up vector the same way as used in the original renders (not in data)
float3 up_tangent;
if (fabs(neg_dir.x) < 0.001f && fabs(neg_dir.z) < 0.001f)
up_tangent = make_float3(-1.0f, 0.0f, 0.0f);
else if (fabs(neg_dir.y) < 0.001f)
up_tangent = make_float3(0.0f, 1.0f, 0.0f);
else
up_tangent = make_float3(-neg_dir.x, 1.0f/neg_dir.y-neg_dir.y, -neg_dir.z);
camerap.up = normalize(up_tangent); // NORMALIZE!!!!
return camerap;
}
__device__ bool find_sphere_t(float3 start, float3 direction, float radius, float2 *t){
/*find the intersection value of t based on the sphere*/
/* x^2 + y^2 + z^2 - r^2 = 0 */
float a, b, c, discrim; /* the a,b,c and discriminate values for our */
float2 t0 = make_float2(0.0f);
/* simplify direction and start point vectors */
float3 d = direction; float3 p = start;
/*Calculate the a, b, c and discriminant values for your quadratic equation*/
a = dot(d,d); /* d dot d */
b = 2 * ( dot(d,p) ); /* 2 * d dot o */
c = dot(p,p) - pow(radius, 2);
discrim = ( pow(b, 2) - (4 * a * c) );
/*if a is zero, then there is no quadratic possible*/
if (a == 0) {
return false;
}//end if
/*if discriminant is < 0, there are two complex roots(no intersection) or one tangential intersecton*/
if ( discrim <= 0) {
return false;
}//end if
/*otherwise, discriminate is > 0 & there are 2 intersection points*/
else {
/*find the two roots*/
t0.x = (-b + sqrt( discrim ) )/ (2 * a);
t0.y = (-b - sqrt(discrim ) )/ (2 * a);
}//end else
*t = t0;
return true;
}
__global__ void cast_rays(struct LFCamera *cam, uint8_t *color, struct LFData * data, float3 uVec, float3 vVec, size_t width, size_t height, float radius){
// calculate the unique thread index
int index = blockIdx.x * blockDim.x + threadIdx.x;
int x = index%width; // unique x pixel
int y = index/height;// unique y pixel
float angle = tan(cam->fov * 0.5);
float u = (2.0 * x - width)/width ;
float v = (2.0 * y - height)/height;
LFCamera cameras[3]; /* three cameras near intersection */
float3 intersection = make_float3(0.0f);
// eye ray
float3 direction = normalize(cam->direction + angle * (uVec * u * cam->aspect_ratio + v * vVec));
float2 t = make_float2(0.0f);
if(find_sphere_t(cam->position, direction, radius, &t)){ // if we intersect with the sphere
// plug t into implicit sphere equation
intersection = cam->position + direction * min(t.x, t.y); // find intersection point
// convert the intersection point to spherical coordinates
float theta = atan2f(intersection.z, intersection.x);
theta += (theta < 0) ? 2 * PI : 0; // add PI if less than 0
float phi = acos(intersection.y / radius);
if(intersection.y > 0){ // upper hemisphere
// get the LFindex of the cameras close to my intersection point
int s = theta/(2*PI) * data->lookup_width - 0.5;
int t = phi/PI * 2 * data->lookup_height - 0.5;
int cam_index = s + t * data->lookup_width;
LFData::LFindex cam_indx; // three closest cameras
LFData::LFabg bari_Vals; // three baricentric coords
memcpy(cam_indx, data->indices[cam_index], sizeof(LFData::LFindex) );
memcpy(bari_Vals, data->abg[cam_index], sizeof(LFData::LFabg) );
// cam_indx[0] = data->indices[cam_index][0]; // cam1 index
// cam_indx[1] = data->indices[cam_index][1]; // cam2 index
// cam_indx[2] = data->indices[cam_index][2]; // cam3 index
cameras[0] = get_lightfield_camera(data, cam_indx[0]); // get cam 1
cameras[1] = get_lightfield_camera(data, cam_indx[1]); // get cam 2
cameras[2] = get_lightfield_camera(data, cam_indx[2]); // get cam 3
/* field of view of the camera */
float height_cam = radius * 0.5 * tan(cam->fov * 0.5);
float width_cam = height_cam * cam->aspect_ratio;
/* conversion factors */
float conv_u = (data->camera_tex_width -1) / (2 * width_cam);
float conv_v = (data->camera_tex_height -1) / (2 * height_cam);
// new color for pixel
float4 color_new = make_float4(0.0f);
for(int i = 0 ;i < 3; i++){
/* find time of intersection */
float d = -dot( cam->position, cameras[i].direction)
/ dot(direction, cameras[i].direction) ;
/* be sure to add the alpha evn if no intersection*/
if(d < 0){
color_new += bari_Vals[i]/255.0 * make_float4(0.0f, 0.0f, 0.0f, 255.0f);
continue;
}
// uVec and vVec of new cameras
float3 uVec1 = cross(cameras[i].direction, cameras[i].up);
float3 vVec1 = cameras[i].up;
// find intersection point
float3 plane_int = cam->position + d * direction;
// find the u, v position
float u1 = dot(uVec1, plane_int);
float v1 = dot(vVec1, plane_int);
u1 = (u1 + width_cam) * conv_u;
v1 = (v1 + height_cam) * conv_v;
// offset into the color
int offset = cam_indx[i] * data->camera_tex_height * data->camera_tex_width;
u1 = clamp(u1, 0.0, data->camera_tex_width-1);
v1 = clamp(v1, 0.0, data->camera_tex_height-1);
int s1 = floor(u1);
int t1 = floor(v1);
float4 avg = make_float4(0.0f);
#ifdef BILINEAR
/* Bilinear interpolation:
* f(x,y) = ( Q11/ ( (x2-x1) * (y2-y1) ) ) * ( (x2-x) * (y2-y) ) +
* ( Q21/ ( (x2-x1) * (y2-y1) ) ) * ( (x-x1) * (y2-y) ) +
* ( Q12/ ( (x2-x1) * (y2-y1) ) ) * ( (x2-x) * (y-y1) ) +
* ( Q22/ ( (x2-x1) * (y2-y1) ) ) * ( (x1-x) * (y-y1) );
*/
float x = u1; float y = v1;
float x1 = floor(x); float x2 = x1 + 1;;
float y1 = floor(y); float y2 = y1 + 1;
float denominator = ( (y2-y1) * (x2-x1) );
float4 Q11 = make_float4(data->color_textures[offset + (int)x1 + (int)y1 * data->camera_tex_width]);
float4 Q21 = make_float4(data->color_textures[offset + (int)x2 + (int)y1 * data->camera_tex_width]);
float4 Q12 = make_float4(data->color_textures[offset + (int)x1 + (int)y2 * data->camera_tex_width]);
float4 Q22 = make_float4(data->color_textures[offset + (int)x2 + (int)y2 * data->camera_tex_width]);
avg = ( Q11 / denominator * ( (x2 - x) * (y2 - y) ) ) +
( Q21 / denominator * ( (x - x1) * (y2 - y) ) ) +
( Q12 / denominator * ( (x2 - x) * (y - y1) ) ) +
( Q22 / denominator * ( (x - x1) * (y - y1) ) );
color_new += avg ;// * bari_Vals[i]/255.0;
#else
/* four closest pixels */
float4 tex1 = make_float4(data->color_textures[offset + s1 + t1 * data->camera_tex_width]);
float4 tex2 = make_float4(data->color_textures[offset + (s1+1) + t1 * data->camera_tex_width]);
float4 tex3 = make_float4(data->color_textures[offset + s1 + (t1+1) * data->camera_tex_width]);
float4 tex4 = make_float4(data->color_textures[offset + (s1+1) + (t1+1) * data->camera_tex_width]);
/* take the average */
avg = (tex1 + tex2 + tex3 + tex4) * 0.25;
color_new += avg * bari_Vals[i]/255.0;
#endif
}
#ifdef TREE
color[index * 4 + 0] = color_new.x;//color_new.x;
color[index * 4 + 1] = color_new.y;
color[index * 4 + 2] = color_new.z;
color[index * 4 + 3] = color_new.w;//color_new.w;
// display indices as colors
#elif defined INDICES
color[index * 4 + 0] = length(intersection - cameras[0].position) * 30;
color[index * 4 + 1] = 0;//length(intersection - cameras[1].position) * 64;
color[index * 4 + 2] = 0;//length(intersection - cameras[2].position) * 64;
color[index * 4 + 3] = 255;
// display baricentric coords
#elif defined BARI
baricentric test
color[index * 4 + 0] = bari_Vals[0];
color[index * 4 + 1] = bari_Vals[1];
color[index * 4 + 2] = bari_Vals[2];
color[index * 4 + 3] = 255;
#endif
}else{ // lower hemisphere = BLACK
color[index * 4 + 0] = 0;
color[index * 4 + 1] = 0;
color[index * 4 + 2] = 0;
color[index * 4 + 3] = 0;
} // end inside else
}else{ // outside sphere = BLACK
color[index * 4 + 0] = 0;
color[index * 4 + 1] = 0;
color[index * 4 + 2] = 0;
color[index * 4 + 3] = 0;
} // end else
} // end cast ray
class MyLightfield : public ILightfield
{
public:
MyLightfield();
~MyLightfield();
virtual bool initialize( const LFData* data, size_t window_width, size_t window_height );
virtual void render( uint8_t* color, size_t width, size_t height, const LFCamera* camera );
};
MyLightfield::MyLightfield() {} // default constructor
MyLightfield::~MyLightfield() // default deconstructor
{
/* clean up */
hipFree(gpuData);
hipFree(gpuCam);
hipFree(gpucolor);
free(cpuData);
}
bool MyLightfield::initialize( const LFData* data, size_t window_width, size_t window_height )
{
/* create a pointer for the camera, data, and color */
hipMalloc((void**)&gpuCam, sizeof(struct LFCamera));
hipMalloc((void**)&gpucolor, 4 * sizeof(uint8_t) * window_height * window_height);
hipMalloc((void**)&gpuData, sizeof(struct LFData));
/* temp cpu struct */
cpuData = (struct LFData*)malloc(sizeof(struct LFData));
memcpy(cpuData, data, sizeof(struct LFData));
/* create GPU pointers to data */
hipMalloc((void**)&cpuData->color_textures, sizeof(uint32_t) * cpuData->camera_tex_height * cpuData->camera_tex_width * cpuData->num_cameras);
hipMemcpy(cpuData->color_textures, data->color_textures, sizeof(uint32_t) * cpuData->camera_tex_height * cpuData->camera_tex_width * cpuData->num_cameras, hipMemcpyHostToDevice);
hipMalloc((void**)&cpuData->indices, sizeof(LFData::LFindex) * cpuData->lookup_height * cpuData->lookup_width);
hipMemcpy(cpuData->indices, data->indices, sizeof(LFData::LFindex) * cpuData->lookup_height * cpuData->lookup_width, hipMemcpyHostToDevice);
hipMalloc((void**)&cpuData->abg, sizeof(LFData::LFabg) * cpuData->lookup_height * cpuData->lookup_width);
hipMemcpy(cpuData->abg, data->abg, sizeof(LFData::LFabg) * cpuData->lookup_height * cpuData->lookup_width, hipMemcpyHostToDevice);
hipMalloc((void**)&cpuData->camera_positions, sizeof(float3) * cpuData->num_cameras);
hipMemcpy(cpuData->camera_positions, data->camera_positions, sizeof(float3) * cpuData->num_cameras, hipMemcpyHostToDevice);
/* copy temp struct to gpu */
hipMemcpy(gpuData, cpuData, sizeof(struct LFData), hipMemcpyHostToDevice);
/* copy over the radius for easy use */
radius = cpuData->camera_radius;
return true;
}
void MyLightfield::render( uint8_t* color, size_t width, size_t height, const LFCamera* camera )
{
/* copy over the viewers camera into gpu memory */
hipMemcpy(gpuCam, camera, sizeof(struct LFCamera), hipMemcpyHostToDevice);
/* constants for kernel */
const int threadsPerBlock = 512 ;
int totalThreads = width * height;
int nBlocks = totalThreads / threadsPerBlock;
nBlocks += ((totalThreads % threadsPerBlock) > 0) ? 1 : 0;
float3 uVec = cross(camera->direction, camera->up);
float3 vVec = camera->up;
/* launch kernel */
hipLaunchKernelGGL(( cast_rays), dim3(nBlocks), dim3(threadsPerBlock), 0, 0, gpuCam, gpucolor, gpuData, uVec, vVec, width, height, radius);
/* copy back data */
hipMemcpy(color, gpucolor, 4 * sizeof(uint8_t) * totalThreads, hipMemcpyDeviceToHost);
}
ILightfield* create_lightfield()
{
return new MyLightfield();
}
}
| 4835f129b8f5fb7585a215f6845bc445a8a2c5c2.cu |
#include "lightfield.hpp"
#include "cutil_math.hpp"
#include "cuda_helpers.hh"
#include <stdio.h>
#include <cmath>
#define PI 3.14159265358979f
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define TREE
namespace lightfield {
/* data structures */
struct LFData *camer_info = 0;
static struct LFCamera *gpuCam = 0;
static uint8_t *gpucolor = 0;
static struct LFData *gpuData = 0;
static struct LFData *cpuData = 0;
static float radius = 0;
// Proivdes data for each of the cameras in the lightfield.
// The up vector is not stored in the data, so you MUST at least
// use the code that computes the up vector in your solution.
static __host__ __device__ void get_lightfield_camera( LFCamera* camerap, const LFData* lfdata, size_t camera_index )
{
// read camera info from lfdata
float3 neg_dir = lfdata->camera_positions[camera_index];
camerap->position = neg_dir * lfdata->camera_radius;
camerap->direction = -neg_dir;
camerap->fov = lfdata->camera_fov;
camerap->aspect_ratio = lfdata->camera_aspect;
// compute up vector the same way as used in the original renders (not in data)
float3 up_tangent;
if (fabs(neg_dir.x) < 0.001f && fabs(neg_dir.z) < 0.001f)
up_tangent = make_float3(-1.0f, 0.0f, 0.0f);
else if (fabs(neg_dir.y) < 0.001f)
up_tangent = make_float3(0.0f, 1.0f, 0.0f);
else
up_tangent = make_float3(-neg_dir.x, 1.0f/neg_dir.y-neg_dir.y, -neg_dir.z);
camerap->up = normalize(up_tangent); // NORMALIZE!!!!!!
}
static __host__ __device__ LFCamera get_lightfield_camera(const LFData* lfdata, uint16_t camera_index )
{
LFCamera camerap;
// read camera info from lfdata
float3 neg_dir = lfdata->camera_positions[camera_index];
camerap.position = neg_dir * lfdata->camera_radius;
camerap.direction = -neg_dir;
camerap.fov = lfdata->camera_fov;
camerap.aspect_ratio = lfdata->camera_aspect;
// compute up vector the same way as used in the original renders (not in data)
float3 up_tangent;
if (fabs(neg_dir.x) < 0.001f && fabs(neg_dir.z) < 0.001f)
up_tangent = make_float3(-1.0f, 0.0f, 0.0f);
else if (fabs(neg_dir.y) < 0.001f)
up_tangent = make_float3(0.0f, 1.0f, 0.0f);
else
up_tangent = make_float3(-neg_dir.x, 1.0f/neg_dir.y-neg_dir.y, -neg_dir.z);
camerap.up = normalize(up_tangent); // NORMALIZE!!!!
return camerap;
}
__device__ bool find_sphere_t(float3 start, float3 direction, float radius, float2 *t){
/*find the intersection value of t based on the sphere*/
/* x^2 + y^2 + z^2 - r^2 = 0 */
float a, b, c, discrim; /* the a,b,c and discriminate values for our */
float2 t0 = make_float2(0.0f);
/* simplify direction and start point vectors */
float3 d = direction; float3 p = start;
/*Calculate the a, b, c and discriminant values for your quadratic equation*/
a = dot(d,d); /* d dot d */
b = 2 * ( dot(d,p) ); /* 2 * d dot o */
c = dot(p,p) - pow(radius, 2);
discrim = ( pow(b, 2) - (4 * a * c) );
/*if a is zero, then there is no quadratic possible*/
if (a == 0) {
return false;
}//end if
/*if discriminant is < 0, there are two complex roots(no intersection) or one tangential intersecton*/
if ( discrim <= 0) {
return false;
}//end if
/*otherwise, discriminate is > 0 & there are 2 intersection points*/
else {
/*find the two roots*/
t0.x = (-b + sqrt( discrim ) )/ (2 * a);
t0.y = (-b - sqrt(discrim ) )/ (2 * a);
}//end else
*t = t0;
return true;
}
__global__ void cast_rays(struct LFCamera *cam, uint8_t *color, struct LFData * data, float3 uVec, float3 vVec, size_t width, size_t height, float radius){
// calculate the unique thread index
int index = blockIdx.x * blockDim.x + threadIdx.x;
int x = index%width; // unique x pixel
int y = index/height;// unique y pixel
float angle = tan(cam->fov * 0.5);
float u = (2.0 * x - width)/width ;
float v = (2.0 * y - height)/height;
LFCamera cameras[3]; /* three cameras near intersection */
float3 intersection = make_float3(0.0f);
// eye ray
float3 direction = normalize(cam->direction + angle * (uVec * u * cam->aspect_ratio + v * vVec));
float2 t = make_float2(0.0f);
if(find_sphere_t(cam->position, direction, radius, &t)){ // if we intersect with the sphere
// plug t into implicit sphere equation
intersection = cam->position + direction * min(t.x, t.y); // find intersection point
// convert the intersection point to spherical coordinates
float theta = atan2f(intersection.z, intersection.x);
theta += (theta < 0) ? 2 * PI : 0; // add PI if less than 0
float phi = acos(intersection.y / radius);
if(intersection.y > 0){ // upper hemisphere
// get the LFindex of the cameras close to my intersection point
int s = theta/(2*PI) * data->lookup_width - 0.5;
int t = phi/PI * 2 * data->lookup_height - 0.5;
int cam_index = s + t * data->lookup_width;
LFData::LFindex cam_indx; // three closest cameras
LFData::LFabg bari_Vals; // three baricentric coords
memcpy(cam_indx, data->indices[cam_index], sizeof(LFData::LFindex) );
memcpy(bari_Vals, data->abg[cam_index], sizeof(LFData::LFabg) );
// cam_indx[0] = data->indices[cam_index][0]; // cam1 index
// cam_indx[1] = data->indices[cam_index][1]; // cam2 index
// cam_indx[2] = data->indices[cam_index][2]; // cam3 index
cameras[0] = get_lightfield_camera(data, cam_indx[0]); // get cam 1
cameras[1] = get_lightfield_camera(data, cam_indx[1]); // get cam 2
cameras[2] = get_lightfield_camera(data, cam_indx[2]); // get cam 3
/* field of view of the camera */
float height_cam = radius * 0.5 * tan(cam->fov * 0.5);
float width_cam = height_cam * cam->aspect_ratio;
/* conversion factors */
float conv_u = (data->camera_tex_width -1) / (2 * width_cam);
float conv_v = (data->camera_tex_height -1) / (2 * height_cam);
// new color for pixel
float4 color_new = make_float4(0.0f);
for(int i = 0 ;i < 3; i++){
/* find time of intersection */
float d = -dot( cam->position, cameras[i].direction)
/ dot(direction, cameras[i].direction) ;
/* be sure to add the alpha evn if no intersection*/
if(d < 0){
color_new += bari_Vals[i]/255.0 * make_float4(0.0f, 0.0f, 0.0f, 255.0f);
continue;
}
// uVec and vVec of new cameras
float3 uVec1 = cross(cameras[i].direction, cameras[i].up);
float3 vVec1 = cameras[i].up;
// find intersection point
float3 plane_int = cam->position + d * direction;
// find the u, v position
float u1 = dot(uVec1, plane_int);
float v1 = dot(vVec1, plane_int);
u1 = (u1 + width_cam) * conv_u;
v1 = (v1 + height_cam) * conv_v;
// offset into the color
int offset = cam_indx[i] * data->camera_tex_height * data->camera_tex_width;
u1 = clamp(u1, 0.0, data->camera_tex_width-1);
v1 = clamp(v1, 0.0, data->camera_tex_height-1);
int s1 = floor(u1);
int t1 = floor(v1);
float4 avg = make_float4(0.0f);
#ifdef BILINEAR
/* Bilinear interpolation:
* f(x,y) = ( Q11/ ( (x2-x1) * (y2-y1) ) ) * ( (x2-x) * (y2-y) ) +
* ( Q21/ ( (x2-x1) * (y2-y1) ) ) * ( (x-x1) * (y2-y) ) +
* ( Q12/ ( (x2-x1) * (y2-y1) ) ) * ( (x2-x) * (y-y1) ) +
* ( Q22/ ( (x2-x1) * (y2-y1) ) ) * ( (x1-x) * (y-y1) );
*/
float x = u1; float y = v1;
float x1 = floor(x); float x2 = x1 + 1;;
float y1 = floor(y); float y2 = y1 + 1;
float denominator = ( (y2-y1) * (x2-x1) );
float4 Q11 = make_float4(data->color_textures[offset + (int)x1 + (int)y1 * data->camera_tex_width]);
float4 Q21 = make_float4(data->color_textures[offset + (int)x2 + (int)y1 * data->camera_tex_width]);
float4 Q12 = make_float4(data->color_textures[offset + (int)x1 + (int)y2 * data->camera_tex_width]);
float4 Q22 = make_float4(data->color_textures[offset + (int)x2 + (int)y2 * data->camera_tex_width]);
avg = ( Q11 / denominator * ( (x2 - x) * (y2 - y) ) ) +
( Q21 / denominator * ( (x - x1) * (y2 - y) ) ) +
( Q12 / denominator * ( (x2 - x) * (y - y1) ) ) +
( Q22 / denominator * ( (x - x1) * (y - y1) ) );
color_new += avg ;// * bari_Vals[i]/255.0;
#else
/* four closest pixels */
float4 tex1 = make_float4(data->color_textures[offset + s1 + t1 * data->camera_tex_width]);
float4 tex2 = make_float4(data->color_textures[offset + (s1+1) + t1 * data->camera_tex_width]);
float4 tex3 = make_float4(data->color_textures[offset + s1 + (t1+1) * data->camera_tex_width]);
float4 tex4 = make_float4(data->color_textures[offset + (s1+1) + (t1+1) * data->camera_tex_width]);
/* take the average */
avg = (tex1 + tex2 + tex3 + tex4) * 0.25;
color_new += avg * bari_Vals[i]/255.0;
#endif
}
#ifdef TREE
color[index * 4 + 0] = color_new.x;//color_new.x;
color[index * 4 + 1] = color_new.y;
color[index * 4 + 2] = color_new.z;
color[index * 4 + 3] = color_new.w;//color_new.w;
// display indices as colors
#elif defined INDICES
color[index * 4 + 0] = length(intersection - cameras[0].position) * 30;
color[index * 4 + 1] = 0;//length(intersection - cameras[1].position) * 64;
color[index * 4 + 2] = 0;//length(intersection - cameras[2].position) * 64;
color[index * 4 + 3] = 255;
// display baricentric coords
#elif defined BARI
baricentric test
color[index * 4 + 0] = bari_Vals[0];
color[index * 4 + 1] = bari_Vals[1];
color[index * 4 + 2] = bari_Vals[2];
color[index * 4 + 3] = 255;
#endif
}else{ // lower hemisphere = BLACK
color[index * 4 + 0] = 0;
color[index * 4 + 1] = 0;
color[index * 4 + 2] = 0;
color[index * 4 + 3] = 0;
} // end inside else
}else{ // outside sphere = BLACK
color[index * 4 + 0] = 0;
color[index * 4 + 1] = 0;
color[index * 4 + 2] = 0;
color[index * 4 + 3] = 0;
} // end else
} // end cast ray
class MyLightfield : public ILightfield
{
public:
MyLightfield();
~MyLightfield();
virtual bool initialize( const LFData* data, size_t window_width, size_t window_height );
virtual void render( uint8_t* color, size_t width, size_t height, const LFCamera* camera );
};
MyLightfield::MyLightfield() {} // default constructor
MyLightfield::~MyLightfield() // default deconstructor
{
/* clean up */
cudaFree(gpuData);
cudaFree(gpuCam);
cudaFree(gpucolor);
free(cpuData);
}
bool MyLightfield::initialize( const LFData* data, size_t window_width, size_t window_height )
{
/* create a pointer for the camera, data, and color */
cudaMalloc((void**)&gpuCam, sizeof(struct LFCamera));
cudaMalloc((void**)&gpucolor, 4 * sizeof(uint8_t) * window_height * window_height);
cudaMalloc((void**)&gpuData, sizeof(struct LFData));
/* temp cpu struct */
cpuData = (struct LFData*)malloc(sizeof(struct LFData));
memcpy(cpuData, data, sizeof(struct LFData));
/* create GPU pointers to data */
cudaMalloc((void**)&cpuData->color_textures, sizeof(uint32_t) * cpuData->camera_tex_height * cpuData->camera_tex_width * cpuData->num_cameras);
cudaMemcpy(cpuData->color_textures, data->color_textures, sizeof(uint32_t) * cpuData->camera_tex_height * cpuData->camera_tex_width * cpuData->num_cameras, cudaMemcpyHostToDevice);
cudaMalloc((void**)&cpuData->indices, sizeof(LFData::LFindex) * cpuData->lookup_height * cpuData->lookup_width);
cudaMemcpy(cpuData->indices, data->indices, sizeof(LFData::LFindex) * cpuData->lookup_height * cpuData->lookup_width, cudaMemcpyHostToDevice);
cudaMalloc((void**)&cpuData->abg, sizeof(LFData::LFabg) * cpuData->lookup_height * cpuData->lookup_width);
cudaMemcpy(cpuData->abg, data->abg, sizeof(LFData::LFabg) * cpuData->lookup_height * cpuData->lookup_width, cudaMemcpyHostToDevice);
cudaMalloc((void**)&cpuData->camera_positions, sizeof(float3) * cpuData->num_cameras);
cudaMemcpy(cpuData->camera_positions, data->camera_positions, sizeof(float3) * cpuData->num_cameras, cudaMemcpyHostToDevice);
/* copy temp struct to gpu */
cudaMemcpy(gpuData, cpuData, sizeof(struct LFData), cudaMemcpyHostToDevice);
/* copy over the radius for easy use */
radius = cpuData->camera_radius;
return true;
}
void MyLightfield::render( uint8_t* color, size_t width, size_t height, const LFCamera* camera )
{
/* copy over the viewers camera into gpu memory */
cudaMemcpy(gpuCam, camera, sizeof(struct LFCamera), cudaMemcpyHostToDevice);
/* constants for kernel */
const int threadsPerBlock = 512 ;
int totalThreads = width * height;
int nBlocks = totalThreads / threadsPerBlock;
nBlocks += ((totalThreads % threadsPerBlock) > 0) ? 1 : 0;
float3 uVec = cross(camera->direction, camera->up);
float3 vVec = camera->up;
/* launch kernel */
cast_rays<<<nBlocks, threadsPerBlock>>>(gpuCam, gpucolor, gpuData, uVec, vVec, width, height, radius);
/* copy back data */
cudaMemcpy(color, gpucolor, 4 * sizeof(uint8_t) * totalThreads, cudaMemcpyDeviceToHost);
}
ILightfield* create_lightfield()
{
return new MyLightfield();
}
}
|
ced9b8686b9b170cdf35ec82bdd6f1463467857a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include "timer.h"
#include <stdio.h>
__global__
void sum_kernel(ull * const d_arr, ull * const d_out, int N)
{
extern __shared__ ull s_arr[];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
if (x >= N)
return;
s_arr[tid] = d_arr[x];
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) {
if (tid < s) {
s_arr[tid] += s_arr[tid + s];
}
__syncthreads();
}
if (tid == 0) {
d_out[blockIdx.x] = s_arr[0];
}
}
ull parallel_sum(const ull * const h_arr, int N)
{
const size_t blockSize = 1024;
size_t gridSize = (N+blockSize-1)/blockSize;
ull *d_arr, *d_out;
size_t arraySize = N * sizeof(ull);
checkCudaErrors(hipMalloc((void**)&d_arr, arraySize));
checkCudaErrors(hipMalloc((void**)&d_out, gridSize * sizeof(ull)));
// host -> device
checkCudaErrors(hipMemcpy(d_arr, h_arr, arraySize, hipMemcpyHostToDevice));
GpuTimer timer;
timer.Start();
size_t numElts = N;
while (numElts >= blockSize) {
hipLaunchKernelGGL(( sum_kernel), dim3(gridSize), dim3(blockSize), blockSize * sizeof(ull), 0, d_arr, d_out, numElts);
// swap
ull *tmp = d_arr;
d_arr = d_out;
d_out = tmp;
numElts = gridSize;
gridSize = (numElts+blockSize-1)/blockSize;
}
timer.Stop();
hipDeviceSynchronize();
ull *h_out = (ull *) malloc(numElts * sizeof(ull));
checkCudaErrors(hipMemcpy(h_out, d_arr, numElts * sizeof(ull), hipMemcpyDeviceToHost));
ull sum = 0;
for (size_t i = 0; i < numElts; ++i)
sum += h_out[i];
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipFree(d_arr);
hipFree(d_out);
free(h_out);
printf("Your parallel code ran in: %f msecs.\n", timer.Elapsed());
return sum;
}
| ced9b8686b9b170cdf35ec82bdd6f1463467857a.cu | #include "utils.h"
#include "timer.h"
#include <stdio.h>
__global__
void sum_kernel(ull * const d_arr, ull * const d_out, int N)
{
extern __shared__ ull s_arr[];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
if (x >= N)
return;
s_arr[tid] = d_arr[x];
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) {
if (tid < s) {
s_arr[tid] += s_arr[tid + s];
}
__syncthreads();
}
if (tid == 0) {
d_out[blockIdx.x] = s_arr[0];
}
}
ull parallel_sum(const ull * const h_arr, int N)
{
const size_t blockSize = 1024;
size_t gridSize = (N+blockSize-1)/blockSize;
ull *d_arr, *d_out;
size_t arraySize = N * sizeof(ull);
checkCudaErrors(cudaMalloc((void**)&d_arr, arraySize));
checkCudaErrors(cudaMalloc((void**)&d_out, gridSize * sizeof(ull)));
// host -> device
checkCudaErrors(cudaMemcpy(d_arr, h_arr, arraySize, cudaMemcpyHostToDevice));
GpuTimer timer;
timer.Start();
size_t numElts = N;
while (numElts >= blockSize) {
sum_kernel<<<gridSize, blockSize, blockSize * sizeof(ull)>>>(d_arr, d_out, numElts);
// swap
ull *tmp = d_arr;
d_arr = d_out;
d_out = tmp;
numElts = gridSize;
gridSize = (numElts+blockSize-1)/blockSize;
}
timer.Stop();
cudaDeviceSynchronize();
ull *h_out = (ull *) malloc(numElts * sizeof(ull));
checkCudaErrors(cudaMemcpy(h_out, d_arr, numElts * sizeof(ull), cudaMemcpyDeviceToHost));
ull sum = 0;
for (size_t i = 0; i < numElts; ++i)
sum += h_out[i];
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
cudaFree(d_arr);
cudaFree(d_out);
free(h_out);
printf("Your parallel code ran in: %f msecs.\n", timer.Elapsed());
return sum;
}
|
ffb064a4d252082aecaebae107ac926e16d6c832.hip | // !!! This is a file automatically generated by hipify!!!
//--------------------------------------------------------------------------
// Project:
// Read GPU temepratures on a CUDA-enabled system.
// Bypass the need for 3rd party libraries.
// Insert into your code as desired.
//
// Prerequisites:
// Must have installed the CUDA toolkit.
// Must be running on a UNIX machine
//
// Independent testing info:
// Compile on commandline: nvcc read_temps.cu -o test
// run on commandline: ./test
//
// Author: Jordan Bonilla
// Date : April 2016
// License: All rights Reserved. See LICENSE.txt
//--------------------------------------------------------------------------
#include <cstdio> // printf
#include <stdlib.h> // popen, pclose, atoi, fread
#include <hip/hip_runtime.h> // hipGetDeviceCount
// Read temperatures of all connected NVIDIA GPUs
void read_temps()
{
// Get the number of GPUs on this machine
int num_devices;
hipGetDeviceCount(&num_devices);
if(num_devices == 0) {
printf("No NVIDIA GPUs detected\n");
return;
}
// Read GPU info into buffer "output" using the "nvidia-smi" command
const unsigned int MAX_BYTES = 10000;
char output[MAX_BYTES];
FILE *fp = popen("nvidia-smi &> /dev/null", "r");
fread(output, sizeof(char), MAX_BYTES, fp);
pclose(fp);
// array to hold GPU temperatures
int * temperatures = new int[num_devices];
// parse output for temperatures using knowledge of "nvidia-smi" output format
int i = 0;
unsigned int num_temps_parsed = 0;
while(output[i] != '\0') {
if(output[i] == '%') {
unsigned int temp_begin = i + 1;
while(output[i] != 'C') {
++i;
}
unsigned int temp_end = i;
char this_temperature[32];
// Read in the characters cooresponding to this temperature
for(int j = 0; j < temp_end - temp_begin; ++j) {
this_temperature[j] = output[temp_begin + j];
}
this_temperature[temp_end - temp_begin + 1] = '\0';
// Convert the string representation to an int
temperatures[num_temps_parsed] = atoi(this_temperature);
num_temps_parsed++;
}
++i;
}
for (int i = 0; i < num_devices; i++)
{
printf("GPU %d temperature: %d C\n", i, temperatures[i]);
}
// Free memory and return
delete(temperatures);
return;
}
int main(int argc, char **argv) {
read_temps();
return 0;
}
| ffb064a4d252082aecaebae107ac926e16d6c832.cu | //--------------------------------------------------------------------------
// Project:
// Read GPU temepratures on a CUDA-enabled system.
// Bypass the need for 3rd party libraries.
// Insert into your code as desired.
//
// Prerequisites:
// Must have installed the CUDA toolkit.
// Must be running on a UNIX machine
//
// Independent testing info:
// Compile on commandline: nvcc read_temps.cu -o test
// run on commandline: ./test
//
// Author: Jordan Bonilla
// Date : April 2016
// License: All rights Reserved. See LICENSE.txt
//--------------------------------------------------------------------------
#include <cstdio> // printf
#include <stdlib.h> // popen, pclose, atoi, fread
#include <cuda_runtime.h> // cudaGetDeviceCount
// Read temperatures of all connected NVIDIA GPUs
void read_temps()
{
// Get the number of GPUs on this machine
int num_devices;
cudaGetDeviceCount(&num_devices);
if(num_devices == 0) {
printf("No NVIDIA GPUs detected\n");
return;
}
// Read GPU info into buffer "output" using the "nvidia-smi" command
const unsigned int MAX_BYTES = 10000;
char output[MAX_BYTES];
FILE *fp = popen("nvidia-smi &> /dev/null", "r");
fread(output, sizeof(char), MAX_BYTES, fp);
pclose(fp);
// array to hold GPU temperatures
int * temperatures = new int[num_devices];
// parse output for temperatures using knowledge of "nvidia-smi" output format
int i = 0;
unsigned int num_temps_parsed = 0;
while(output[i] != '\0') {
if(output[i] == '%') {
unsigned int temp_begin = i + 1;
while(output[i] != 'C') {
++i;
}
unsigned int temp_end = i;
char this_temperature[32];
// Read in the characters cooresponding to this temperature
for(int j = 0; j < temp_end - temp_begin; ++j) {
this_temperature[j] = output[temp_begin + j];
}
this_temperature[temp_end - temp_begin + 1] = '\0';
// Convert the string representation to an int
temperatures[num_temps_parsed] = atoi(this_temperature);
num_temps_parsed++;
}
++i;
}
for (int i = 0; i < num_devices; i++)
{
printf("GPU %d temperature: %d C\n", i, temperatures[i]);
}
// Free memory and return
delete(temperatures);
return;
}
int main(int argc, char **argv) {
read_temps();
return 0;
}
|
4a8e0273fc7b2d30aa1e0cb1c1155bf123621967.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <ctime>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
using namespace std;
__global__
void vecAddKernel(float * A, float *B, float *C, int n){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i<n) C[i] = A[i] + B[i];
}
void vecAdd(float * A, float *B, float *C, int n){
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
hipMalloc((void**) &d_A, size);
hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
hipMalloc((void**) &d_B, size);
hipMemcpy(d_B,B,size, hipMemcpyHostToDevice);
hipMalloc((void**) &d_C, size);
dim3 dimGrid(ceil(n/256.0),1,1);
dim3 dimBlock(256,1,1);
hipLaunchKernelGGL(( vecAddKernel), dim3(dimGrid),dim3(dimBlock) , 0, 0, d_A, d_B, d_C, n);
hipMemcpy(C,d_C,size, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
int main(){
float * A = new float[1000];
float * B = new float[1000];
float * C = new float[1000];
for(int i = 0; i<1000; i++){
A[i] = i;
B[i] = i;
C[i] = 1;
}
vecAdd(A,B,C,1000);
for(int i = 0; i<20; i++)
cout << C[i] << endl;
return 0;
} | 4a8e0273fc7b2d30aa1e0cb1c1155bf123621967.cu | #include <iostream>
#include <stdlib.h>
#include <ctime>
#include <cuda_runtime.h>
#include <cuda.h>
using namespace std;
__global__
void vecAddKernel(float * A, float *B, float *C, int n){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i<n) C[i] = A[i] + B[i];
}
void vecAdd(float * A, float *B, float *C, int n){
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void**) &d_A, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_B, size);
cudaMemcpy(d_B,B,size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_C, size);
dim3 dimGrid(ceil(n/256.0),1,1);
dim3 dimBlock(256,1,1);
vecAddKernel<<<dimGrid,dimBlock >>> (d_A, d_B, d_C, n);
cudaMemcpy(C,d_C,size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main(){
float * A = new float[1000];
float * B = new float[1000];
float * C = new float[1000];
for(int i = 0; i<1000; i++){
A[i] = i;
B[i] = i;
C[i] = 1;
}
vecAdd(A,B,C,1000);
for(int i = 0; i<20; i++)
cout << C[i] << endl;
return 0;
} |
c00ade7375801d72a9e2199615d16db0f48848f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "projektcuda.h"
#include "project_comm.h"
/* Kernel to square elements of the array on the GPU */
/*
Matrix A is mA x nB , Vector B is nB
Vector C output vector in size of mA
C=A*B
description:
each row of A occuppy one block. if gridDim is smaller than the row number of A
*/
__global__ void matrixMul( t_ve* C, t_ve* A, t_ve* B, int mA, int nB)
{
//define a Result Vector for each block
__shared__ float Cs[VECTOR_BLOCK_SIZE];//VECTOR_BLOCK_SIZE shuld equal blockDim 512
//define gridIndex, if gridDim < mA, gridIndex > 0;
int gridIndex = 0;
// get a thread indentifier
//int idx = gridIndex*gridDim.x + blockIdx.x*blockDim.x+threadIdx.x;
int aBegin = 0;
int bBegin = 0;
int aStep = gridDim.x;
int bStep = VECTOR_BLOCK_SIZE; // blockDim.x
int aEnd = mA;
int bEnd = nB;
//initialise Cs
Cs[threadIdx.x] = 0;
__syncthreads();
//initialize output vector for each block
if(threadIdx.x==0){
C[gridIndex*gridDim.x+blockIdx.x]=0;
}
__syncthreads();
// if nB > gridDim???????
//idx < (gridIndex*gridDim.x+mA%VECTOR_BLOCK_SIZE)*()
for(int a = aBegin; (a < aEnd)&&((gridIndex*gridDim.x+blockIdx.x)<aEnd); a += aStep, gridIndex++){
//initialize output vector for each block
if(threadIdx.x==0){
C[gridIndex*gridDim.x+blockIdx.x]=0;
}
__syncthreads();
//following is operations within one block
// initialize the dot product for each row in A and vector B
t_ve blocksum = 0;
//if nB> blockDim, split repeat the
for(int b = bBegin; (b < bEnd)&&((threadIdx.x+b) < bEnd); b += bStep ) {
//initialise Cs
Cs[threadIdx.x] = 0;
__syncthreads();
// compute scalar product
if (( (gridIndex*gridDim.x+blockIdx.x)<aEnd)&&((b+threadIdx.x) < bEnd)) {
//Cs[threadIdx.x] = A[a + blockIdx.x ][b + threadIdx.x] * B[b + threadIdx.x ];
Cs[threadIdx.x] = A[(a + blockIdx.x)* nB+b + threadIdx.x] * B[b + threadIdx.x ];
}
__syncthreads();
if(threadIdx.x == 0){
//30.Nov.2009 fixeded for Cs summe
int kEnd = bEnd-b;
if(kEnd > VECTOR_BLOCK_SIZE)kEnd = VECTOR_BLOCK_SIZE;
//Because I add Cs[0...k], if blockSize and Matrix does not fit, Parts of Cs[k] are not initialized as 0.
for (int k = 0; k < kEnd; k++) blocksum += Cs[k];
}
__syncthreads();
//Cs[threadIdx.x] = 0;
//__syncthreads();
}//for b
__syncthreads();
if(threadIdx.x == 0) C[gridIndex*gridDim.x+blockIdx.x] = blocksum;
__syncthreads();
// summe all block, need test for mA bigger than one Grid
//idx = gridIndex*gridDim.x + blockIdx.x*blockDim.x+threadIdx.x;
}
} | c00ade7375801d72a9e2199615d16db0f48848f1.cu | #include "cuda.h"
#include <stdio.h>
#include "projektcuda.h"
#include "project_comm.h"
/* Kernel to square elements of the array on the GPU */
/*
Matrix A is mA x nB , Vector B is nB
Vector C output vector in size of mA
C=A*B
description:
each row of A occuppy one block. if gridDim is smaller than the row number of A
*/
__global__ void matrixMul( t_ve* C, t_ve* A, t_ve* B, int mA, int nB)
{
//define a Result Vector for each block
__shared__ float Cs[VECTOR_BLOCK_SIZE];//VECTOR_BLOCK_SIZE shuld equal blockDim 512
//define gridIndex, if gridDim < mA, gridIndex > 0;
int gridIndex = 0;
// get a thread indentifier
//int idx = gridIndex*gridDim.x + blockIdx.x*blockDim.x+threadIdx.x;
int aBegin = 0;
int bBegin = 0;
int aStep = gridDim.x;
int bStep = VECTOR_BLOCK_SIZE; // blockDim.x
int aEnd = mA;
int bEnd = nB;
//initialise Cs
Cs[threadIdx.x] = 0;
__syncthreads();
//initialize output vector for each block
if(threadIdx.x==0){
C[gridIndex*gridDim.x+blockIdx.x]=0;
}
__syncthreads();
// if nB > gridDim???????
//idx < (gridIndex*gridDim.x+mA%VECTOR_BLOCK_SIZE)*()
for(int a = aBegin; (a < aEnd)&&((gridIndex*gridDim.x+blockIdx.x)<aEnd); a += aStep, gridIndex++){
//initialize output vector for each block
if(threadIdx.x==0){
C[gridIndex*gridDim.x+blockIdx.x]=0;
}
__syncthreads();
//following is operations within one block
// initialize the dot product for each row in A and vector B
t_ve blocksum = 0;
//if nB> blockDim, split repeat the
for(int b = bBegin; (b < bEnd)&&((threadIdx.x+b) < bEnd); b += bStep ) {
//initialise Cs
Cs[threadIdx.x] = 0;
__syncthreads();
// compute scalar product
if (( (gridIndex*gridDim.x+blockIdx.x)<aEnd)&&((b+threadIdx.x) < bEnd)) {
//Cs[threadIdx.x] = A[a + blockIdx.x ][b + threadIdx.x] * B[b + threadIdx.x ];
Cs[threadIdx.x] = A[(a + blockIdx.x)* nB+b + threadIdx.x] * B[b + threadIdx.x ];
}
__syncthreads();
if(threadIdx.x == 0){
//30.Nov.2009 fixeded for Cs summe
int kEnd = bEnd-b;
if(kEnd > VECTOR_BLOCK_SIZE)kEnd = VECTOR_BLOCK_SIZE;
//Because I add Cs[0...k], if blockSize and Matrix does not fit, Parts of Cs[k] are not initialized as 0.
for (int k = 0; k < kEnd; k++) blocksum += Cs[k];
}
__syncthreads();
//Cs[threadIdx.x] = 0;
//__syncthreads();
}//for b
__syncthreads();
if(threadIdx.x == 0) C[gridIndex*gridDim.x+blockIdx.x] = blocksum;
__syncthreads();
// summe all block, need test for mA bigger than one Grid
//idx = gridIndex*gridDim.x + blockIdx.x*blockDim.x+threadIdx.x;
}
} |
2145bc4213d364d5270f7b8ecd63812422377146.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#define CSC(call) \
do { \
hipError_t res = call; \
if (res != hipSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, hipGetErrorString(res)); \
exit(0); \
} \
} while(0)
__global__ void subtract_row(double *matrix, int n, int column) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int i, j;
double coeff;
double divisor = matrix[column * n + column];
for (i = 1 + column + idx; i < n; i += offsetx) {
coeff = matrix[column * n + i] / divisor;
for (j = 1 + column + idy; j < n + 1; j += offsety) {
matrix[j * n + i] -= coeff * matrix[j * n + column];
}
}
}
__global__ void reverse_subtract_row(double *matrix, int n, int column) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int offsetx = blockDim.x * gridDim.x;
int i;
double coeff;
double divisor = matrix[column * n + column];
for (i = idx; i < column; i += offsetx) {
coeff = matrix[column * n + i] / divisor;
matrix[n * n + i] -= coeff * matrix[n * n + column];
}
}
__global__ void normalize(double *matrix, int n) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int offsetx = blockDim.x * gridDim.x;
int i;
for (i = idx; i < n; i += offsetx) {
matrix[n * n + i] /= matrix[i * n + i];
}
}
__global__ void swap_rows(double *matrix, int n, int column, int max_row) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int offsetx = blockDim.x * gridDim.x;
int i;
double tmp;
for (i = idx + column; i < n + 1; i+= offsetx) {
tmp = matrix[i * n + column];
matrix[i * n + column] = matrix[i * n + max_row];
matrix[i * n + max_row] = tmp;
}
}
struct compare {
__host__ __device__ bool operator ()(double lhs, double rhs) {
return fabs(lhs) < fabs(rhs);
}
};
void solve(double *matrix, int n) {
for (int column = 0; column < n; ++column) {
thrust::device_ptr<double> thrust_matrix =
thrust::device_pointer_cast(matrix) + n * column;
int max_row = thrust::max_element(thrust_matrix + column,
thrust_matrix + n, compare()) - thrust_matrix;
if (max_row >= n) {
continue;
}
hipLaunchKernelGGL(( swap_rows), dim3(32), dim3(32), 0, 0, matrix, n, column, max_row);
hipLaunchKernelGGL(( subtract_row), dim3(dim3(32, 32)), dim3(dim3(32, 32)), 0, 0, matrix, n, column);
}
for (int column = n - 1; column >= 0; --column) {
hipLaunchKernelGGL(( reverse_subtract_row), dim3(32), dim3(32), 0, 0, matrix, n, column);
}
hipLaunchKernelGGL(( normalize), dim3(32), dim3(32), 0, 0, matrix, n);
}
int main() {
int n;
scanf("%d", &n);
double *matrix = (double *) malloc(sizeof(double *) * (n + 1) * n);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
scanf("%lf", matrix + j * n + i);
}
}
for (int i = 0; i < n; ++i) {
scanf("%lf", matrix + n * n + i);
}
double *device_matrix;
CSC(hipMalloc(&device_matrix, sizeof(double) * (n + 1) * n));
CSC(hipMemcpy(device_matrix, matrix, sizeof(double) * (n + 1) * n, hipMemcpyHostToDevice));
solve(device_matrix, n);
CSC(hipMemcpy(matrix + n * n, device_matrix + n * n, sizeof(double) * n, hipMemcpyDeviceToHost));
for (int i = 0; i < n; ++i) {
printf("%.10e ", matrix[n * n + i]);
}
printf("\n");
CSC(hipFree(device_matrix));
free(matrix);
return 0;
}
| 2145bc4213d364d5270f7b8ecd63812422377146.cu | #include <stdio.h>
#include <math.h>
#include <float.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
__global__ void subtract_row(double *matrix, int n, int column) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int i, j;
double coeff;
double divisor = matrix[column * n + column];
for (i = 1 + column + idx; i < n; i += offsetx) {
coeff = matrix[column * n + i] / divisor;
for (j = 1 + column + idy; j < n + 1; j += offsety) {
matrix[j * n + i] -= coeff * matrix[j * n + column];
}
}
}
__global__ void reverse_subtract_row(double *matrix, int n, int column) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int offsetx = blockDim.x * gridDim.x;
int i;
double coeff;
double divisor = matrix[column * n + column];
for (i = idx; i < column; i += offsetx) {
coeff = matrix[column * n + i] / divisor;
matrix[n * n + i] -= coeff * matrix[n * n + column];
}
}
__global__ void normalize(double *matrix, int n) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int offsetx = blockDim.x * gridDim.x;
int i;
for (i = idx; i < n; i += offsetx) {
matrix[n * n + i] /= matrix[i * n + i];
}
}
__global__ void swap_rows(double *matrix, int n, int column, int max_row) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int offsetx = blockDim.x * gridDim.x;
int i;
double tmp;
for (i = idx + column; i < n + 1; i+= offsetx) {
tmp = matrix[i * n + column];
matrix[i * n + column] = matrix[i * n + max_row];
matrix[i * n + max_row] = tmp;
}
}
struct compare {
__host__ __device__ bool operator ()(double lhs, double rhs) {
return fabs(lhs) < fabs(rhs);
}
};
void solve(double *matrix, int n) {
for (int column = 0; column < n; ++column) {
thrust::device_ptr<double> thrust_matrix =
thrust::device_pointer_cast(matrix) + n * column;
int max_row = thrust::max_element(thrust_matrix + column,
thrust_matrix + n, compare()) - thrust_matrix;
if (max_row >= n) {
continue;
}
swap_rows<<<32, 32>>>(matrix, n, column, max_row);
subtract_row<<<dim3(32, 32), dim3(32, 32)>>>(matrix, n, column);
}
for (int column = n - 1; column >= 0; --column) {
reverse_subtract_row<<<32, 32>>>(matrix, n, column);
}
normalize<<<32, 32>>>(matrix, n);
}
int main() {
int n;
scanf("%d", &n);
double *matrix = (double *) malloc(sizeof(double *) * (n + 1) * n);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
scanf("%lf", matrix + j * n + i);
}
}
for (int i = 0; i < n; ++i) {
scanf("%lf", matrix + n * n + i);
}
double *device_matrix;
CSC(cudaMalloc(&device_matrix, sizeof(double) * (n + 1) * n));
CSC(cudaMemcpy(device_matrix, matrix, sizeof(double) * (n + 1) * n, cudaMemcpyHostToDevice));
solve(device_matrix, n);
CSC(cudaMemcpy(matrix + n * n, device_matrix + n * n, sizeof(double) * n, cudaMemcpyDeviceToHost));
for (int i = 0; i < n; ++i) {
printf("%.10e ", matrix[n * n + i]);
}
printf("\n");
CSC(cudaFree(device_matrix));
free(matrix);
return 0;
}
|
562a83784e429dd436d34161c1bfba8ac87fc9de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <array>
#include <cassert>
#include <cmath>
#include <iomanip>
#include <iostream>
#include <random>
#include <sstream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "shapes.hpp"
#include "moving_object.hpp"
#include "draw_shapes.cuh"
int constexpr FRAME_WIDTH = 960;
int constexpr FRAME_HEIGHT = 600;
void cudaCheckSuccess(hipError_t const cuda_status, std::string const& message)
{
if(hipSuccess != cuda_status) {
std::cout << "CUDA ERROR " << cuda_status << ": " << message << std::endl;
std::cout << "- " << hipGetErrorName(cuda_status) << ": " << hipGetErrorString(cuda_status) << std::endl;
}
}
struct CudaThreadConfig
{
explicit CudaThreadConfig(cv::Size2i const& bounding_box_extents)
: bb_size(bounding_box_extents)
{
int const w = bb_size.width;
int const h = bb_size.height;
int const block_width = 32;
int const block_height = 32;
int const grid_width = (w + 31) / 32;
int const grid_height = (h + 31) / 32;
block.x = block_width;
block.y = block_height;
block.z = 1;
grid.x = grid_width;
grid.y = grid_height;
grid.z = 1;
}
float fragmentation() const {
int const bb_area = bb_size.width * bb_size.height;
int const grid_area = (grid.x * block.x) * (grid.y * block.y);
float const result = 1.0f - float(bb_area) / float(grid_area);
return result;
}
cv::Size2i bb_size;
dim3 grid;
dim3 block;
};
std::ostream& operator << (std::ostream& o, dim3 const& d) {
o << '(' << d.x << ',' << d.y << ')';
return o;
}
void generateFrames(int const num_frames,
std::vector<MovingObject>& circles,
std::string const& prefix)
{
cv::Mat frame(FRAME_HEIGHT, FRAME_WIDTH, CV_8UC3);
int const frame_num_bytes = FRAME_HEIGHT * FRAME_WIDTH * 3;
hipError_t cuda_status = hipSuccess;
// CUDA stream
hipStream_t cuda_stream;
cuda_status = hipStreamCreate(&cuda_stream);
cudaCheckSuccess(cuda_status, "Error creating CUDA stream");
// Allocate frame on device
uchar3 *dev_frame = nullptr;
cuda_status = hipMalloc((void**) &dev_frame, frame_num_bytes);
cudaCheckSuccess(cuda_status, "Error allocating frame in device");
for(int i_frame = 0; i_frame < num_frames; ++i_frame) {
std::string const frame_string = "frame " + std::to_string(i_frame);
std::cout << "Drawing " << frame_string << '\n';
// Clear frame
dim3 clear_block(16,16,1);
dim3 clear_grid((FRAME_WIDTH+15)/16, (FRAME_HEIGHT+15)/16, 1);
hipLaunchKernelGGL(( clearFrame), dim3(clear_grid), dim3(clear_block), 0, cuda_stream, dev_frame, FRAME_WIDTH, FRAME_HEIGHT);
cuda_status = hipGetLastError();
cudaCheckSuccess(cuda_status, "Error launching 'clear' kernel in frame " + std::to_string(i_frame));
// Draw circles
int circle_counter = 0;
for(auto &obj : circles) {
cv::Rect2i const bb = obj.boundingBox();
uchar3 const color{obj.shape().color()[0], obj.shape().color()[1], obj.shape().color()[2]};
cv::Point2i const position = obj.position();
int const radius = bb.size().width / 2;
CudaThreadConfig const cuda_thread_config(bb.size());
std::cout << "- Circle " << circle_counter
<< " bb = " << cuda_thread_config.bb_size
<< " grid = " << cuda_thread_config.grid
<< " block = " << cuda_thread_config.block
<< " fragmentation = " << cuda_thread_config.fragmentation()
<< '\n';
hipLaunchKernelGGL(( drawCircle), dim3(cuda_thread_config.grid), dim3(cuda_thread_config.block), 0, cuda_stream, position.x, position.y, radius, color, dev_frame, FRAME_WIDTH, FRAME_HEIGHT);
cuda_status = hipGetLastError();
cudaCheckSuccess(cuda_status, "Error launching kernel for circle " + std::to_string(circle_counter) + " in " + frame_string);
obj.update();
++circle_counter;
}
// Fetch frame from GPU and write to img file
cuda_status = hipMemcpy((void*) frame.data, (void*) dev_frame, frame_num_bytes, hipMemcpyDeviceToHost);
cudaCheckSuccess(cuda_status, "Error copying data out of GPU in " + frame_string);
std::ostringstream s;
s << prefix << '-' << std::setw(3) << std::setfill('0') << i_frame << ".png";
cv::imwrite(s.str(), frame);
}
hipFree((void*) dev_frame);
}
int main(int const argc, char const *argv[])
{
assert(argc == 3 && "Usage: video_frame_generator <frames> <prefix>-nnn.png");
int const num_frames = std::atoi(argv[1]);
std::vector<MovingObject> circles = generateRandomMovingCircles(25, FRAME_WIDTH, FRAME_HEIGHT);
generateFrames(num_frames, circles, argv[2]);
return 0;
}
| 562a83784e429dd436d34161c1bfba8ac87fc9de.cu | #include <array>
#include <cassert>
#include <cmath>
#include <iomanip>
#include <iostream>
#include <random>
#include <sstream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "shapes.hpp"
#include "moving_object.hpp"
#include "draw_shapes.cuh"
int constexpr FRAME_WIDTH = 960;
int constexpr FRAME_HEIGHT = 600;
void cudaCheckSuccess(cudaError_t const cuda_status, std::string const& message)
{
if(cudaSuccess != cuda_status) {
std::cout << "CUDA ERROR " << cuda_status << ": " << message << std::endl;
std::cout << "- " << cudaGetErrorName(cuda_status) << ": " << cudaGetErrorString(cuda_status) << std::endl;
}
}
struct CudaThreadConfig
{
explicit CudaThreadConfig(cv::Size2i const& bounding_box_extents)
: bb_size(bounding_box_extents)
{
int const w = bb_size.width;
int const h = bb_size.height;
int const block_width = 32;
int const block_height = 32;
int const grid_width = (w + 31) / 32;
int const grid_height = (h + 31) / 32;
block.x = block_width;
block.y = block_height;
block.z = 1;
grid.x = grid_width;
grid.y = grid_height;
grid.z = 1;
}
float fragmentation() const {
int const bb_area = bb_size.width * bb_size.height;
int const grid_area = (grid.x * block.x) * (grid.y * block.y);
float const result = 1.0f - float(bb_area) / float(grid_area);
return result;
}
cv::Size2i bb_size;
dim3 grid;
dim3 block;
};
std::ostream& operator << (std::ostream& o, dim3 const& d) {
o << '(' << d.x << ',' << d.y << ')';
return o;
}
void generateFrames(int const num_frames,
std::vector<MovingObject>& circles,
std::string const& prefix)
{
cv::Mat frame(FRAME_HEIGHT, FRAME_WIDTH, CV_8UC3);
int const frame_num_bytes = FRAME_HEIGHT * FRAME_WIDTH * 3;
cudaError_t cuda_status = cudaSuccess;
// CUDA stream
cudaStream_t cuda_stream;
cuda_status = cudaStreamCreate(&cuda_stream);
cudaCheckSuccess(cuda_status, "Error creating CUDA stream");
// Allocate frame on device
uchar3 *dev_frame = nullptr;
cuda_status = cudaMalloc((void**) &dev_frame, frame_num_bytes);
cudaCheckSuccess(cuda_status, "Error allocating frame in device");
for(int i_frame = 0; i_frame < num_frames; ++i_frame) {
std::string const frame_string = "frame " + std::to_string(i_frame);
std::cout << "Drawing " << frame_string << '\n';
// Clear frame
dim3 clear_block(16,16,1);
dim3 clear_grid((FRAME_WIDTH+15)/16, (FRAME_HEIGHT+15)/16, 1);
clearFrame<<<clear_grid, clear_block, 0, cuda_stream>>>(dev_frame, FRAME_WIDTH, FRAME_HEIGHT);
cuda_status = cudaGetLastError();
cudaCheckSuccess(cuda_status, "Error launching 'clear' kernel in frame " + std::to_string(i_frame));
// Draw circles
int circle_counter = 0;
for(auto &obj : circles) {
cv::Rect2i const bb = obj.boundingBox();
uchar3 const color{obj.shape().color()[0], obj.shape().color()[1], obj.shape().color()[2]};
cv::Point2i const position = obj.position();
int const radius = bb.size().width / 2;
CudaThreadConfig const cuda_thread_config(bb.size());
std::cout << "- Circle " << circle_counter
<< " bb = " << cuda_thread_config.bb_size
<< " grid = " << cuda_thread_config.grid
<< " block = " << cuda_thread_config.block
<< " fragmentation = " << cuda_thread_config.fragmentation()
<< '\n';
drawCircle<<<cuda_thread_config.grid, cuda_thread_config.block, 0, cuda_stream>>>(position.x, position.y, radius, color, dev_frame, FRAME_WIDTH, FRAME_HEIGHT);
cuda_status = cudaGetLastError();
cudaCheckSuccess(cuda_status, "Error launching kernel for circle " + std::to_string(circle_counter) + " in " + frame_string);
obj.update();
++circle_counter;
}
// Fetch frame from GPU and write to img file
cuda_status = cudaMemcpy((void*) frame.data, (void*) dev_frame, frame_num_bytes, cudaMemcpyDeviceToHost);
cudaCheckSuccess(cuda_status, "Error copying data out of GPU in " + frame_string);
std::ostringstream s;
s << prefix << '-' << std::setw(3) << std::setfill('0') << i_frame << ".png";
cv::imwrite(s.str(), frame);
}
cudaFree((void*) dev_frame);
}
int main(int const argc, char const *argv[])
{
assert(argc == 3 && "Usage: video_frame_generator <frames> <prefix>-nnn.png");
int const num_frames = std::atoi(argv[1]);
std::vector<MovingObject> circles = generateRandomMovingCircles(25, FRAME_WIDTH, FRAME_HEIGHT);
generateFrames(num_frames, circles, argv[2]);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.