hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
335b76f1828215b9a8d04eb660c5d9fee53836e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in DAC'10
paper "An Effective GPU Implementation of Breadth-First Search"
Copyright (c) 2010 University of Illinois at Urbana-Champaign.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Author: Lijiuan Luo ([email protected])
************************************************************************************/
#ifndef _KERNEL_H_
#define _KERNEL_H_
/**********
Define colors for BFS
1) the definition of White, gray and black comes from the text book "Introduction to Algorithms"
2) For path search problems, people may choose to use different colors to record the found paths.
Therefore we reserve numbers (0-16677216) for this purpose. Only nodes with colors bigger than
UP_LIMIT are free to visit
3) We define two gray shades to differentiate between the new frontier nodes and the old frontier nodes that
have not been marked BLACK
*************/
#define UP_LIMIT 16677216//2^24
#define WHITE 16677217
#define GRAY 16677218
#define GRAY0 16677219
#define GRAY1 16677220
#define BLACK 16677221
#include "config.h"
texture<Node> g_graph_node_ref;
texture<Edge> g_graph_edge_ref;
volatile __device__ int count = 0;
volatile __device__ int no_of_nodes_vol = 0;
volatile __device__ int stay_vol = 0;
/*****************************************************************************
This is the most general version of BFS kernel, i.e. no assumption about #block in the grid
\param q1: the array to hold the current frontier
\param q2: the array to hold the new frontier
\param g_graph_nodes: the nodes in the input graph
\param g_graph_edges: the edges i nthe input graph
\param g_color: the colors of nodes
\param g_cost: the costs of nodes
\param no_of_nodes: the number of nodes in the current frontier
\param tail: pointer to the location of the tail of the new frontier. *tail is the size of the new frontier
\param gray_shade: the shade of the gray in current BFS propagation. See GRAY0, GRAY1 macro definitions for more details
\param k: the level of current propagation in the BFS tree. k= 0 for the first propagation.
***********************************************************************/
__global__ void
BFS_kernel(int * q1,
int * q2,
Node* g_graph_nodes,
Edge* g_graph_edges,
int* g_color,
int * g_cost,
int no_of_nodes,
int * tail,
int gray_shade,
int k)
{
__shared__ int local_q_tail;//the tails of each local warp-level queue
__shared__ int local_q[NUM_BIN*W_QUEUE_SIZE];//the local warp-level queues
//current w-queue, a.k.a prefix sum
__shared__ int shift;
if(threadIdx.x == 0){
local_q_tail = 0;//initialize the tail of w-queue
}
__syncthreads();
//first, propagate and add the new frontier elements into w-queues
// int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if( tid<no_of_nodes)
{
int pid = q1[tid]; //the current frontier node, or the parent node of the new frontier nodes
g_color[pid] = BLACK;
int cur_cost = g_cost[pid];
//into
Node cur_node = tex1Dfetch(g_graph_node_ref,pid);
for(int i=cur_node.x; i<cur_node.y + cur_node.x; i++)//visit each neighbor of the
//current frontier node.
{
Edge cur_edge = tex1Dfetch(g_graph_edge_ref,i);
int id = cur_edge.x;
int cost = cur_edge.y;
cost += cur_cost;
int orig_cost = atomicMin(&g_cost[id],cost);
if(orig_cost > cost){//the node should be visited
if(g_color[id] > UP_LIMIT){
int old_color = atomicExch(&g_color[id],gray_shade);
//this guarantees that only one thread will push this node
//into a queue
if(old_color != gray_shade) {
//atomic operation guarantees the correctness
//even if multiple warps are executing simultaneously
int index = atomicAdd(&local_q_tail,1);
local_q[index] = id;
}
}
}
}
}
__syncthreads();
if(threadIdx.x == 0){
int tot_sum = local_q_tail;
//the offset or "shift" of the block-level queue within the grid-level queue
//is determined by atomic operation
shift = atomicAdd(tail,tot_sum);
}
__syncthreads();
int local_shift = threadIdx.x;//shift within a w-queue
//loop unrolling was originally used for better performance, but removed for better readability
while(local_shift < local_q_tail){
q2[shift + local_shift] = local_q[local_shift];
local_shift += blockDim.x;//multiple threads are copying elements at the same time,
//so we shift by multiple elements for next iteration
}
}
#endif
|
335b76f1828215b9a8d04eb660c5d9fee53836e5.cu
|
/***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in DAC'10
paper "An Effective GPU Implementation of Breadth-First Search"
Copyright (c) 2010 University of Illinois at Urbana-Champaign.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Author: Lijiuan Luo ([email protected])
************************************************************************************/
#ifndef _KERNEL_H_
#define _KERNEL_H_
/**********
Define colors for BFS
1) the definition of White, gray and black comes from the text book "Introduction to Algorithms"
2) For path search problems, people may choose to use different colors to record the found paths.
Therefore we reserve numbers (0-16677216) for this purpose. Only nodes with colors bigger than
UP_LIMIT are free to visit
3) We define two gray shades to differentiate between the new frontier nodes and the old frontier nodes that
have not been marked BLACK
*************/
#define UP_LIMIT 16677216//2^24
#define WHITE 16677217
#define GRAY 16677218
#define GRAY0 16677219
#define GRAY1 16677220
#define BLACK 16677221
#include "config.h"
texture<Node> g_graph_node_ref;
texture<Edge> g_graph_edge_ref;
volatile __device__ int count = 0;
volatile __device__ int no_of_nodes_vol = 0;
volatile __device__ int stay_vol = 0;
/*****************************************************************************
This is the most general version of BFS kernel, i.e. no assumption about #block in the grid
\param q1: the array to hold the current frontier
\param q2: the array to hold the new frontier
\param g_graph_nodes: the nodes in the input graph
\param g_graph_edges: the edges i nthe input graph
\param g_color: the colors of nodes
\param g_cost: the costs of nodes
\param no_of_nodes: the number of nodes in the current frontier
\param tail: pointer to the location of the tail of the new frontier. *tail is the size of the new frontier
\param gray_shade: the shade of the gray in current BFS propagation. See GRAY0, GRAY1 macro definitions for more details
\param k: the level of current propagation in the BFS tree. k= 0 for the first propagation.
***********************************************************************/
__global__ void
BFS_kernel(int * q1,
int * q2,
Node* g_graph_nodes,
Edge* g_graph_edges,
int* g_color,
int * g_cost,
int no_of_nodes,
int * tail,
int gray_shade,
int k)
{
__shared__ int local_q_tail;//the tails of each local warp-level queue
__shared__ int local_q[NUM_BIN*W_QUEUE_SIZE];//the local warp-level queues
//current w-queue, a.k.a prefix sum
__shared__ int shift;
if(threadIdx.x == 0){
local_q_tail = 0;//initialize the tail of w-queue
}
__syncthreads();
//first, propagate and add the new frontier elements into w-queues
// int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if( tid<no_of_nodes)
{
int pid = q1[tid]; //the current frontier node, or the parent node of the new frontier nodes
g_color[pid] = BLACK;
int cur_cost = g_cost[pid];
//into
Node cur_node = tex1Dfetch(g_graph_node_ref,pid);
for(int i=cur_node.x; i<cur_node.y + cur_node.x; i++)//visit each neighbor of the
//current frontier node.
{
Edge cur_edge = tex1Dfetch(g_graph_edge_ref,i);
int id = cur_edge.x;
int cost = cur_edge.y;
cost += cur_cost;
int orig_cost = atomicMin(&g_cost[id],cost);
if(orig_cost > cost){//the node should be visited
if(g_color[id] > UP_LIMIT){
int old_color = atomicExch(&g_color[id],gray_shade);
//this guarantees that only one thread will push this node
//into a queue
if(old_color != gray_shade) {
//atomic operation guarantees the correctness
//even if multiple warps are executing simultaneously
int index = atomicAdd(&local_q_tail,1);
local_q[index] = id;
}
}
}
}
}
__syncthreads();
if(threadIdx.x == 0){
int tot_sum = local_q_tail;
//the offset or "shift" of the block-level queue within the grid-level queue
//is determined by atomic operation
shift = atomicAdd(tail,tot_sum);
}
__syncthreads();
int local_shift = threadIdx.x;//shift within a w-queue
//loop unrolling was originally used for better performance, but removed for better readability
while(local_shift < local_q_tail){
q2[shift + local_shift] = local_q[local_shift];
local_shift += blockDim.x;//multiple threads are copying elements at the same time,
//so we shift by multiple elements for next iteration
}
}
#endif
|
45befa9b9b5c6509bdae3665d16cbb2cc7a7820a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* GaussianMoveGenerator.cu
*
* Created on: 29 lip 2019
* Author: pkua
*/
#include "GaussianMoveGenerator.h"
#if CUDA_HOST_COMPILATION
GaussianMoveGenerator::GaussianMoveGenerator(float sigma, float integrationStep, unsigned int seed,
size_t numberOfTrajectories)
: numberOfTrajectories{numberOfTrajectories}
{
this->randomGenerator.seed(seed);
// We need to divide sigma by sqrt(2), because if we sample x and y with sigma^2, then r is sampled from 2sigma^2
// And after this integraton step
this->normalDistribution = std::normal_distribution<float>(0.f, sigma * M_SQRT1_2 * std::sqrt(integrationStep));
}
Move GaussianMoveGenerator::generateMove() {
return {this->normalDistribution(this->randomGenerator), this->normalDistribution(this->randomGenerator)};
}
GaussianMoveGenerator::~GaussianMoveGenerator() {
}
#else // CUDA_DEVICE_COMPILATION
CUDA_HOSTDEV GaussianMoveGenerator::GaussianMoveGenerator(float sigma, float integrationStep, unsigned int seed,
size_t numberOfTrajectories)
: numberOfTrajectories{numberOfTrajectories}
{
// Divide sigma by sqrt(2), because if we sample x and y with sigma^2, then r is sampled from 2sigma^2
// After this, take integration step info account
this->sigma = sigma * float{M_SQRT1_2} * sqrtf(integrationStep);
this->states = new hiprandState_t[this->numberOfTrajectories];
for (size_t i = 0; i < numberOfTrajectories; i++)
hiprand_init(seed, i, 0, &(this->states[i]));
}
CUDA_HOSTDEV GaussianMoveGenerator::~GaussianMoveGenerator() {
delete [] this->states;
}
CUDA_HOSTDEV Move GaussianMoveGenerator::generateMove() {
int i = CUDA_THREAD_IDX;
return {hiprand_normal(&(this->states[i])) * this->sigma, hiprand_normal(&(this->states[i])) * this->sigma};
}
#endif // Choice between cuda device and host compilation
|
45befa9b9b5c6509bdae3665d16cbb2cc7a7820a.cu
|
/*
* GaussianMoveGenerator.cu
*
* Created on: 29 lip 2019
* Author: pkua
*/
#include "GaussianMoveGenerator.h"
#if CUDA_HOST_COMPILATION
GaussianMoveGenerator::GaussianMoveGenerator(float sigma, float integrationStep, unsigned int seed,
size_t numberOfTrajectories)
: numberOfTrajectories{numberOfTrajectories}
{
this->randomGenerator.seed(seed);
// We need to divide sigma by sqrt(2), because if we sample x and y with sigma^2, then r is sampled from 2sigma^2
// And after this integraton step
this->normalDistribution = std::normal_distribution<float>(0.f, sigma * M_SQRT1_2 * std::sqrt(integrationStep));
}
Move GaussianMoveGenerator::generateMove() {
return {this->normalDistribution(this->randomGenerator), this->normalDistribution(this->randomGenerator)};
}
GaussianMoveGenerator::~GaussianMoveGenerator() {
}
#else // CUDA_DEVICE_COMPILATION
CUDA_HOSTDEV GaussianMoveGenerator::GaussianMoveGenerator(float sigma, float integrationStep, unsigned int seed,
size_t numberOfTrajectories)
: numberOfTrajectories{numberOfTrajectories}
{
// Divide sigma by sqrt(2), because if we sample x and y with sigma^2, then r is sampled from 2sigma^2
// After this, take integration step info account
this->sigma = sigma * float{M_SQRT1_2} * sqrtf(integrationStep);
this->states = new curandState[this->numberOfTrajectories];
for (size_t i = 0; i < numberOfTrajectories; i++)
curand_init(seed, i, 0, &(this->states[i]));
}
CUDA_HOSTDEV GaussianMoveGenerator::~GaussianMoveGenerator() {
delete [] this->states;
}
CUDA_HOSTDEV Move GaussianMoveGenerator::generateMove() {
int i = CUDA_THREAD_IDX;
return {curand_normal(&(this->states[i])) * this->sigma, curand_normal(&(this->states[i])) * this->sigma};
}
#endif // Choice between cuda device and host compilation
|
6da8fa64e9d6434e57e236f1d008a5683c323a47.hip
|
// !!! This is a file automatically generated by hipify!!!
// File : YnLayerDropoutayerGpu.cu
// Brief : Implement methods.
// DD-MM-YYYY : 28-08-2016
// Author : haittt
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "../include/YnLayerDropoutGpu.h"
}
/**************** Define */
/**************** Macro */
/**************** Enum */
/**************** Struct */
/**************** Local variables */
/**************** Global variables */
/**************** Local Implement */
/**************** Implement */
YN_GPU_GLOBAL void _YnDropout(float *input,
int size,
float *rand,
float prob,
float scale)
{
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id < size)
input[id] = (rand[id] < prob) ? 0 : input[id] * scale;
}
YN_EXTERN_C
void YnLayerDropoutGpuForward(tYnLayer layer,
tYnNetworkState netState)
{
int size;
if (!netState.train)
return;
size = layer.inputs * layer.batch;
YnCudaRandomArray(layer.randGpu, size);
hipLaunchKernelGGL(( _YnDropout), dim3(YnCudaGridSize(size)), dim3(YN_GPU_NUM_THREADS_IN_BLOCK), 0, 0, state.input, size, layer.randGpu, layer.probability, layer.scale);
YnCudaCheckError(hipPeekAtLastError());
}
YN_EXTERN_C
void YnLayerDropoutGpuBackward(tYnLayer layer,
tYnNetworkState netState)
{
int size;
if (!netState.delta)
return;
size = layer.inputs * layer.batch;
hipLaunchKernelGGL(( _YnDropout), dim3(YnCudaGridSize(size)), dim3(YN_GPU_NUM_THREADS_IN_BLOCK), 0, 0, state.delta, size, layer.randGpu, layer.probability, layer.scale);
YnCudaCheckError(hipPeekAtLastError());
}
|
6da8fa64e9d6434e57e236f1d008a5683c323a47.cu
|
// File : YnLayerDropoutayerGpu.cu
// Brief : Implement methods.
// DD-MM-YYYY : 28-08-2016
// Author : haittt
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "../include/YnLayerDropoutGpu.h"
}
/**************** Define */
/**************** Macro */
/**************** Enum */
/**************** Struct */
/**************** Local variables */
/**************** Global variables */
/**************** Local Implement */
/**************** Implement */
YN_GPU_GLOBAL void _YnDropout(float *input,
int size,
float *rand,
float prob,
float scale)
{
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id < size)
input[id] = (rand[id] < prob) ? 0 : input[id] * scale;
}
YN_EXTERN_C
void YnLayerDropoutGpuForward(tYnLayer layer,
tYnNetworkState netState)
{
int size;
if (!netState.train)
return;
size = layer.inputs * layer.batch;
YnCudaRandomArray(layer.randGpu, size);
_YnDropout<<<YnCudaGridSize(size), YN_GPU_NUM_THREADS_IN_BLOCK>>>(state.input, size, layer.randGpu, layer.probability, layer.scale);
YnCudaCheckError(cudaPeekAtLastError());
}
YN_EXTERN_C
void YnLayerDropoutGpuBackward(tYnLayer layer,
tYnNetworkState netState)
{
int size;
if (!netState.delta)
return;
size = layer.inputs * layer.batch;
_YnDropout<<<YnCudaGridSize(size), YN_GPU_NUM_THREADS_IN_BLOCK>>>(state.delta, size, layer.randGpu, layer.probability, layer.scale);
YnCudaCheckError(cudaPeekAtLastError());
}
|
7956e9ada878c29c20b641ecd3dd52ac98a3fdb1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/adagrad_kernel.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
#include "paddle/phi/kernels/impl/adagrad_kernel_impl.h"
namespace phi {
template <typename T, int block_size>
__global__ void MergeGradKernel(const T* grad,
const int64_t* grad_rows,
T* grad_merge,
const int64_t* grad_merge_rows,
size_t grad_merge_rows_size,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
__shared__ size_t grad_merge_idx;
if (tid == 0) {
for (size_t i = 0; i < grad_merge_rows_size; i++) {
if (grad_rows[ty] == grad_merge_rows[i]) {
grad_merge_idx = i;
}
}
}
__syncthreads();
grad += ty * row_numel;
grad_merge += grad_merge_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]);
}
}
template <typename T, int block_size>
__global__ void SparseAdagradFunctorKernel(const T* grad,
const int64_t* rows,
const T* learning_rate,
T* param,
T* moment,
int64_t row_numel,
T epsilon) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
grad += ty * row_numel;
param += rows[ty] * row_numel;
moment += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(param + index,
-1.0 * learning_rate[0] * grad[index] /
(sqrt(moment[index]) + epsilon));
}
}
template <typename T>
struct SparseAdagradFunctor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& grad,
const DenseTensor& learning_rate,
T epsilon,
DenseTensor* moment,
DenseTensor* param) {
// 1. g_m.rows = set(g.rows)
auto grad_width = grad.value().dims()[1];
phi::funcs::scatter::MergeAdd<phi::GPUContext, T> merge_func;
auto grad_merge = merge_func(context, grad);
auto* grad_merge_data = grad_merge.mutable_value()->template data<T>();
paddle::framework::Vector<int64_t> merge_rows(grad_merge.rows());
// 2. m += g_m * g_m
auto grad_square =
SquareSelectedRows<phi::GPUContext, T>(context, grad_merge);
phi::funcs::SelectedRowsAddToTensor<phi::GPUContext, T> functor;
functor(context, grad_square, moment);
// 3. update parameter
auto* lr = learning_rate.data<T>();
auto* param_data = param->data<T>();
auto* moment_data = moment->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid2(1, merge_rows.size());
paddle::framework::MixVector<int64_t> mixv_merge_rows(&merge_rows);
hipLaunchKernelGGL(( SparseAdagradFunctorKernel<T, 256>)
, dim3(grid2),
dim3(threads),
0,
reinterpret_cast<const phi::GPUContext&>(context).stream(),
grad_merge_data,
mixv_merge_rows.CUDAMutableData(context.GetPlace()),
lr,
param_data,
moment_data,
grad_width,
epsilon);
mixv_merge_rows.CopyToCPU();
}
};
template struct SparseAdagradFunctor<phi::GPUContext, float>;
template struct SparseAdagradFunctor<phi::GPUContext, double>;
} // namespace phi
PD_REGISTER_KERNEL(
adagrad, GPU, ALL_LAYOUT, phi::AdagradDenseKernel, float, double) {}
PD_REGISTER_KERNEL(adagrad_dense_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::AdagradSparseKernel,
float,
double) {}
|
7956e9ada878c29c20b641ecd3dd52ac98a3fdb1.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/adagrad_kernel.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
#include "paddle/phi/kernels/impl/adagrad_kernel_impl.h"
namespace phi {
template <typename T, int block_size>
__global__ void MergeGradKernel(const T* grad,
const int64_t* grad_rows,
T* grad_merge,
const int64_t* grad_merge_rows,
size_t grad_merge_rows_size,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
__shared__ size_t grad_merge_idx;
if (tid == 0) {
for (size_t i = 0; i < grad_merge_rows_size; i++) {
if (grad_rows[ty] == grad_merge_rows[i]) {
grad_merge_idx = i;
}
}
}
__syncthreads();
grad += ty * row_numel;
grad_merge += grad_merge_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]);
}
}
template <typename T, int block_size>
__global__ void SparseAdagradFunctorKernel(const T* grad,
const int64_t* rows,
const T* learning_rate,
T* param,
T* moment,
int64_t row_numel,
T epsilon) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
grad += ty * row_numel;
param += rows[ty] * row_numel;
moment += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(param + index,
-1.0 * learning_rate[0] * grad[index] /
(sqrt(moment[index]) + epsilon));
}
}
template <typename T>
struct SparseAdagradFunctor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& grad,
const DenseTensor& learning_rate,
T epsilon,
DenseTensor* moment,
DenseTensor* param) {
// 1. g_m.rows = set(g.rows)
auto grad_width = grad.value().dims()[1];
phi::funcs::scatter::MergeAdd<phi::GPUContext, T> merge_func;
auto grad_merge = merge_func(context, grad);
auto* grad_merge_data = grad_merge.mutable_value()->template data<T>();
paddle::framework::Vector<int64_t> merge_rows(grad_merge.rows());
// 2. m += g_m * g_m
auto grad_square =
SquareSelectedRows<phi::GPUContext, T>(context, grad_merge);
phi::funcs::SelectedRowsAddToTensor<phi::GPUContext, T> functor;
functor(context, grad_square, moment);
// 3. update parameter
auto* lr = learning_rate.data<T>();
auto* param_data = param->data<T>();
auto* moment_data = moment->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid2(1, merge_rows.size());
paddle::framework::MixVector<int64_t> mixv_merge_rows(&merge_rows);
SparseAdagradFunctorKernel<T, 256>
<<<grid2,
threads,
0,
reinterpret_cast<const phi::GPUContext&>(context).stream()>>>(
grad_merge_data,
mixv_merge_rows.CUDAMutableData(context.GetPlace()),
lr,
param_data,
moment_data,
grad_width,
epsilon);
mixv_merge_rows.CopyToCPU();
}
};
template struct SparseAdagradFunctor<phi::GPUContext, float>;
template struct SparseAdagradFunctor<phi::GPUContext, double>;
} // namespace phi
PD_REGISTER_KERNEL(
adagrad, GPU, ALL_LAYOUT, phi::AdagradDenseKernel, float, double) {}
PD_REGISTER_KERNEL(adagrad_dense_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::AdagradSparseKernel,
float,
double) {}
|
460625844d4ef2d2922aaf05f84bcfd3f93e03fb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void bias_add(double* input, double* bias, double* ret, int rlen, int clen, int PQ) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
int biasIndex = iy / PQ;
ret[index] = input[index] + bias[biasIndex];
}
}
|
460625844d4ef2d2922aaf05f84bcfd3f93e03fb.cu
|
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void bias_add(double* input, double* bias, double* ret, int rlen, int clen, int PQ) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
int biasIndex = iy / PQ;
ret[index] = input[index] + bias[biasIndex];
}
}
|
7922fc1eed5a0186a1465b1cc388a5255516bc6f.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_CUDA
#include <hip/hip_runtime.h>
#endif
#ifdef PADDLE_WITH_HIP
#include <hip/hip_runtime.h>
#endif
#include <memory>
#include "glog/logging.h"
#include "gtest/gtest.h"
#include "paddle/phi/backends/context_pool.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/mixed_vector.h"
template <typename T>
using vec = phi::MixVector<T>;
using gpuStream_t = phi::gpuStream_t;
static __global__ void multiply_10(int* ptr) {
for (int i = 0; i < 10; ++i) {
ptr[i] *= 10;
}
}
gpuStream_t GetCUDAStream(phi::GPUPlace place) {
return reinterpret_cast<const phi::GPUContext*>(
phi::DeviceContextPool::Instance().Get(place))
->stream();
}
TEST(mixed_vector, GPU_VECTOR) {
std::vector<int> x;
for (int i = 0; i < 10; ++i) {
x.push_back(i);
}
vec<int> tmp(&x);
ASSERT_EQ(tmp.size(), 10UL);
phi::GPUPlace gpu(0);
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(multiply_10,
dim3(1),
dim3(1),
0,
GetCUDAStream(gpu),
tmp.MutableData(gpu));
#else
hipLaunchKernelGGL(( multiply_10), dim3(1), dim3(1), 0, GetCUDAStream(gpu), tmp.MutableData(gpu));
#endif
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(tmp[i], i * 10);
}
}
TEST(mixed_vector, MultiGPU) {
if (phi::backends::gpu::GetGPUDeviceCount() < 2) {
LOG(WARNING) << "Skip mixed_vector.MultiGPU since there are not multiple "
"GPUs in your machine.";
return;
}
std::vector<int> x;
for (int i = 0; i < 10; ++i) {
x.push_back(i);
}
vec<int> tmp(&x);
ASSERT_EQ(tmp.size(), 10UL);
phi::GPUPlace gpu0(0);
phi::backends::gpu::SetDeviceId(0);
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(multiply_10,
dim3(1),
dim3(1),
0,
GetCUDAStream(gpu0),
tmp.MutableData(gpu0));
#else
hipLaunchKernelGGL(( multiply_10), dim3(1), dim3(1), 0, GetCUDAStream(gpu0), tmp.MutableData(gpu0));
#endif
phi::GPUPlace gpu1(1);
auto* gpu1_ptr = tmp.MutableData(gpu1);
phi::backends::gpu::SetDeviceId(1);
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(
multiply_10, dim3(1), dim3(1), 0, GetCUDAStream(gpu1), gpu1_ptr);
#else
hipLaunchKernelGGL(( multiply_10), dim3(1), dim3(1), 0, GetCUDAStream(gpu1), gpu1_ptr);
#endif
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(tmp[i], i * 100);
}
}
|
7922fc1eed5a0186a1465b1cc388a5255516bc6f.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_CUDA
#include <cuda_runtime.h>
#endif
#ifdef PADDLE_WITH_HIP
#include <hip/hip_runtime.h>
#endif
#include <memory>
#include "glog/logging.h"
#include "gtest/gtest.h"
#include "paddle/phi/backends/context_pool.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/mixed_vector.h"
template <typename T>
using vec = phi::MixVector<T>;
using gpuStream_t = phi::gpuStream_t;
static __global__ void multiply_10(int* ptr) {
for (int i = 0; i < 10; ++i) {
ptr[i] *= 10;
}
}
gpuStream_t GetCUDAStream(phi::GPUPlace place) {
return reinterpret_cast<const phi::GPUContext*>(
phi::DeviceContextPool::Instance().Get(place))
->stream();
}
TEST(mixed_vector, GPU_VECTOR) {
std::vector<int> x;
for (int i = 0; i < 10; ++i) {
x.push_back(i);
}
vec<int> tmp(&x);
ASSERT_EQ(tmp.size(), 10UL);
phi::GPUPlace gpu(0);
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(multiply_10,
dim3(1),
dim3(1),
0,
GetCUDAStream(gpu),
tmp.MutableData(gpu));
#else
multiply_10<<<1, 1, 0, GetCUDAStream(gpu)>>>(tmp.MutableData(gpu));
#endif
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(tmp[i], i * 10);
}
}
TEST(mixed_vector, MultiGPU) {
if (phi::backends::gpu::GetGPUDeviceCount() < 2) {
LOG(WARNING) << "Skip mixed_vector.MultiGPU since there are not multiple "
"GPUs in your machine.";
return;
}
std::vector<int> x;
for (int i = 0; i < 10; ++i) {
x.push_back(i);
}
vec<int> tmp(&x);
ASSERT_EQ(tmp.size(), 10UL);
phi::GPUPlace gpu0(0);
phi::backends::gpu::SetDeviceId(0);
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(multiply_10,
dim3(1),
dim3(1),
0,
GetCUDAStream(gpu0),
tmp.MutableData(gpu0));
#else
multiply_10<<<1, 1, 0, GetCUDAStream(gpu0)>>>(tmp.MutableData(gpu0));
#endif
phi::GPUPlace gpu1(1);
auto* gpu1_ptr = tmp.MutableData(gpu1);
phi::backends::gpu::SetDeviceId(1);
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(
multiply_10, dim3(1), dim3(1), 0, GetCUDAStream(gpu1), gpu1_ptr);
#else
multiply_10<<<1, 1, 0, GetCUDAStream(gpu1)>>>(gpu1_ptr);
#endif
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(tmp[i], i * 100);
}
}
|
2265e4bf5e730e6e6c0f9f00c75eb3bbcf0d355c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "VSIDS.cuh"
__device__ VSIDS::VSIDS(size_t n_vars)
: decay_factor { 2 }
, clauses_before_decaying { 50 }
, random_decision_frequency { 0.01f }
, n_vars { n_vars }
, vars { new vsids_var[n_vars] }
{
for (size_t i = 0; i < n_vars; i++) {
vars[i].free = true;
vars[i].negative_lit_sum = 0;
vars[i].positive_lit_sum = 0;
}
uint32_t seed = (uint32_t)clock();
initCurand(seed);
}
__device__ void VSIDS::initCurand(uint32_t seed)
{
hiprand_init(seed, 0, 0, &randState);
}
__device__ Var VSIDS::next_random_var()
{
Var v;
while (true) {
v = hiprand(&randState) % n_vars;
if (vars[v].free) {
break;
}
}
return v;
}
__device__ bool VSIDS::next_random_polarity()
{
return hiprand(&randState) % 2 == 0;
}
__device__ void VSIDS::decay()
{
for (int i = 0; i < n_vars; i++) {
vars[i].negative_lit_sum /= decay_factor;
vars[i].positive_lit_sum /= decay_factor;
}
}
__device__ void VSIDS::increment(Lit literal)
{
Var v = var(literal);
bool s = sign(literal);
if (s) {
vars[v].positive_lit_sum++;
}
else {
vars[v].negative_lit_sum++;
}
}
__device__ void VSIDS::free_var(Var v)
{
#ifdef USE_ASSERTIONS
assert(v >= 0 && v < n_vars);
#endif
vars[v].free = true;
}
__device__ void VSIDS::block_var(Var v)
{
#ifdef USE_ASSERTIONS
assert(v >= 0 && v < n_vars);
#endif
vars[v].free = false;
}
__device__ void VSIDS::handle_clause(Clause c)
{
for (int i = 0; i < c.n_lits; i++) {
Lit literal = c.literals[i];
increment(literal);
}
n_learnt_clauses++;
if (n_learnt_clauses % clauses_before_decaying == 0
&& n_learnt_clauses > 0) {
decay();
}
}
__device__ Lit VSIDS::next_random_literal()
{
return mkLit(next_random_var(), next_random_polarity());
}
__device__ Lit VSIDS::next_higher_literal()
{
Lit higher;
higher.x = -1;
int last_sum = -1;
for (size_t i = 0; i < n_vars; i++) {
vsids_var vvar = vars[i];
if (vvar.free) {
if (vvar.positive_lit_sum > last_sum) {
Lit l = mkLit(i, true);
last_sum = vvar.positive_lit_sum;
higher = l;
}
if (vvar.negative_lit_sum > last_sum) {
Lit l = mkLit(i, false);
last_sum = vvar.negative_lit_sum;
higher = l;
}
}
}
#ifdef USE_ASSERTIONS
assert(higher.x >= 0);
#endif
return higher;
}
__device__ Lit VSIDS::next_literal()
{
float prop = hiprand_uniform(&randState);
if (prop < random_decision_frequency) {
return next_random_literal();
}
//if (n_decisions % clauses_before_decaying == 0 && n_decisions > 0)
// decay();
n_decisions++;
return next_higher_literal();
}
__device__ bool VSIDS::is_free(Var v)
{
return vars[v].free;
}
__device__ void VSIDS::print()
{
printf("Evals[%d] = {", n_vars);
for (int i = 0; i < n_vars; i++) {
Lit lp = mkLit(i, true);
Lit ln = mkLit(i, false);
print_lit(lp);
printf("=%d ", vars[i].positive_lit_sum);
print_lit(ln);
printf("=%d(%s), ", vars[i].negative_lit_sum, vars[i].free ? "T" : "F");
}
printf("}\nDecisions = %d\n", n_decisions);
}
|
2265e4bf5e730e6e6c0f9f00c75eb3bbcf0d355c.cu
|
#include "VSIDS.cuh"
__device__ VSIDS::VSIDS(size_t n_vars)
: decay_factor { 2 }
, clauses_before_decaying { 50 }
, random_decision_frequency { 0.01f }
, n_vars { n_vars }
, vars { new vsids_var[n_vars] }
{
for (size_t i = 0; i < n_vars; i++) {
vars[i].free = true;
vars[i].negative_lit_sum = 0;
vars[i].positive_lit_sum = 0;
}
uint32_t seed = (uint32_t)clock();
initCurand(seed);
}
__device__ void VSIDS::initCurand(uint32_t seed)
{
curand_init(seed, 0, 0, &randState);
}
__device__ Var VSIDS::next_random_var()
{
Var v;
while (true) {
v = curand(&randState) % n_vars;
if (vars[v].free) {
break;
}
}
return v;
}
__device__ bool VSIDS::next_random_polarity()
{
return curand(&randState) % 2 == 0;
}
__device__ void VSIDS::decay()
{
for (int i = 0; i < n_vars; i++) {
vars[i].negative_lit_sum /= decay_factor;
vars[i].positive_lit_sum /= decay_factor;
}
}
__device__ void VSIDS::increment(Lit literal)
{
Var v = var(literal);
bool s = sign(literal);
if (s) {
vars[v].positive_lit_sum++;
}
else {
vars[v].negative_lit_sum++;
}
}
__device__ void VSIDS::free_var(Var v)
{
#ifdef USE_ASSERTIONS
assert(v >= 0 && v < n_vars);
#endif
vars[v].free = true;
}
__device__ void VSIDS::block_var(Var v)
{
#ifdef USE_ASSERTIONS
assert(v >= 0 && v < n_vars);
#endif
vars[v].free = false;
}
__device__ void VSIDS::handle_clause(Clause c)
{
for (int i = 0; i < c.n_lits; i++) {
Lit literal = c.literals[i];
increment(literal);
}
n_learnt_clauses++;
if (n_learnt_clauses % clauses_before_decaying == 0
&& n_learnt_clauses > 0) {
decay();
}
}
__device__ Lit VSIDS::next_random_literal()
{
return mkLit(next_random_var(), next_random_polarity());
}
__device__ Lit VSIDS::next_higher_literal()
{
Lit higher;
higher.x = -1;
int last_sum = -1;
for (size_t i = 0; i < n_vars; i++) {
vsids_var vvar = vars[i];
if (vvar.free) {
if (vvar.positive_lit_sum > last_sum) {
Lit l = mkLit(i, true);
last_sum = vvar.positive_lit_sum;
higher = l;
}
if (vvar.negative_lit_sum > last_sum) {
Lit l = mkLit(i, false);
last_sum = vvar.negative_lit_sum;
higher = l;
}
}
}
#ifdef USE_ASSERTIONS
assert(higher.x >= 0);
#endif
return higher;
}
__device__ Lit VSIDS::next_literal()
{
float prop = curand_uniform(&randState);
if (prop < random_decision_frequency) {
return next_random_literal();
}
//if (n_decisions % clauses_before_decaying == 0 && n_decisions > 0)
// decay();
n_decisions++;
return next_higher_literal();
}
__device__ bool VSIDS::is_free(Var v)
{
return vars[v].free;
}
__device__ void VSIDS::print()
{
printf("Evals[%d] = {", n_vars);
for (int i = 0; i < n_vars; i++) {
Lit lp = mkLit(i, true);
Lit ln = mkLit(i, false);
print_lit(lp);
printf("=%d ", vars[i].positive_lit_sum);
print_lit(ln);
printf("=%d(%s), ", vars[i].negative_lit_sum, vars[i].free ? "T" : "F");
}
printf("}\nDecisions = %d\n", n_decisions);
}
|
7e34baeb43ce10c785a54cf808d2756b405e106d.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2011-2013 Gerhard Reitmayr, TU Graz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "kfusion.h"
#include "helpers.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <cstring>
#include <dirent.h>
#include <cerrno>
#include <cmath>
#include <png++/png.hpp>
#include <jpeglib.h>
using namespace std;
using namespace TooN;
KFusion kfusion;
Image<uint16_t, HostDevice> depthImage;
SE3<float> initPose;
Matrix4 second_pose;
float size;
bool stop_run = false;
/*============================================================================*/
Image<uint16_t, HostDevice> fusedDepth;
////////////////////////////////////////////////////////////////////////////////
// global parameter
int param_start_index = -1;
int param_volume_size = 640;
float param_volume_dimension = 4.f;
int param_frame_threshold = 11;
float param_angle_factor = 1.f;
float param_translation_factor = 1.f;
float param_rsme_threshold = 1.5e-2f;
int param_file_name_length = 24;
int param_time_stamp_pose = 8;
int param_time_stamp_length = 12;
enum KinfuMode {KINFU_FORWARD, KINFU_BACKWARD};
KinfuMode param_mode = KINFU_FORWARD;
// voxel resolution: 0.01 meter
////////////////////////////////////////////////////////////////////////////////
int file_index;
float angle_threshold, translation_threshold;
const int kImageRows = 480;
const int kImageCols = 640;
const int kImageChannels = 3;
vector<string> image_list;
vector<string> depth_list;
vector<string> extrinsic_list;
#ifdef INITIAL_POSE
vector<Matrix4> extrinsic_poses;
#endif
string data_dir, image_dir, depth_dir, fused_dir, extrinsic_dir;
////////////////////////////////////////////////////////////////////////////////
void GetFileNames(const string dir, vector<string> *file_list) {
DIR *dp;
struct dirent *dirp;
if((dp = opendir(dir.c_str())) == NULL) {
cout << "Error(" << errno << ") opening " << dir << endl;
}
while ((dirp = readdir(dp)) != NULL) {
file_list->push_back(dir + string(dirp->d_name));
}
closedir(dp);
sort( file_list->begin(), file_list->end() );
file_list->erase(file_list->begin()); //.
file_list->erase(file_list->begin()); //..
}
////////////////////////////////////////////////////////////////////////////////
bool GetDepthData(string file_name, uint16_t *data) {
png::image< png::gray_pixel_16 > img(file_name.c_str(),
png::require_color_space< png::gray_pixel_16 >());
int index = 0;
for (int i = 0; i < kImageRows; ++i) {
for (int j = 0; j < kImageCols; ++j) {
uint16_t s = img.get_pixel(j, i);
*(data + index) = (s << 13 | s >> 3);
++index;
}
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
void SaveFusedDepthFile() {
string depth_full_name = depth_list[param_start_index];
string depth_serial_name = depth_full_name.substr(
depth_full_name.size() - param_file_name_length, param_file_name_length);
string fused_full_name = fused_dir + depth_serial_name;
#ifdef RESOLUTION_1280X960
png::image<png::gray_pixel_16> img(kImageCols * 2, kImageRows * 2);
kfusion.Raycast_2();
renderFusedMap(fusedDepth.getDeviceImage(), kfusion.vertex_2);
for (int i = 0; i < kImageRows * 2; ++i) {
for (int j = 0; j < kImageCols * 2; ++j) {
uint16_t s = fusedDepth[make_uint2(j,i)];
img[i][j] = (s >> 13 | s << 3);
}
}
#else
png::image<png::gray_pixel_16> img(kImageCols, kImageRows);
renderFusedMap(fusedDepth.getDeviceImage(), kfusion.vertex);
hipDeviceSynchronize();
for (int i = 0; i < kImageRows; ++i) {
for (int j = 0; j < kImageCols; ++j) {
uint16_t s = fusedDepth[make_uint2(j,i)];
img[i][j] = (s >> 13 | s << 3);
}
}
#endif
img.write(fused_full_name.c_str());
#if 0
string pose_txt_name = data_dir + "poseTSDF.txt";
ofstream pose_file;
pose_file.open(pose_txt_name.c_str(), fstream::app);
pose_file.precision(60);
for (int i = 0; i < 3; ++i) {
pose_file << second_pose.data[i].x << "\t";
pose_file << second_pose.data[i].y << "\t";
pose_file << second_pose.data[i].z << "\t";
pose_file << second_pose.data[i].w << "\n";
}
pose_file.close();
#endif
}
////////////////////////////////////////////////////////////////////////////////
bool GetExtrinsicData(string file_name, vector<Matrix4> *poses) {
FILE *fp = fopen(file_name.c_str(), "r");
for (int i = 0; i < image_list.size(); ++i) {
Matrix4 m;
for (int d = 0; d < 3; ++d) {
if (fscanf(fp, "%f", &m.data[d].x));
if (fscanf(fp, "%f", &m.data[d].y));
if (fscanf(fp, "%f", &m.data[d].z));
if (fscanf(fp, "%f", &m.data[d].w));
}
m.data[3].x = m.data[3].y = m.data[3].z = 0.f;
m.data[3].w = 1.f;
poses->push_back(m);
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
bool GetImageData(string file_name, unsigned char *data) {
unsigned char *raw_image = NULL;
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPROW row_pointer[1];
FILE *infile = fopen(file_name.c_str(), "rb");
unsigned long location = 0;
if (!infile) {
printf("Error opening jpeg file %s\n!", file_name.c_str());
return -1;
}
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, infile);
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
raw_image = (unsigned char*) malloc(
cinfo.output_width * cinfo.output_height * cinfo.num_components);
row_pointer[0] = (unsigned char *) malloc(
cinfo.output_width * cinfo.num_components);
while (cinfo.output_scanline < cinfo.image_height) {
jpeg_read_scanlines(&cinfo, row_pointer, 1);
for (uint i = 0; i < cinfo.image_width * cinfo.num_components; i++)
raw_image[location++] = row_pointer[0][i];
}
int index = 0;
for (uint i = 0; i < cinfo.image_height; ++i) {
for (uint j = 0; j < cinfo.image_width; ++j) {
for (int k = 0; k < kImageChannels; ++k) {
*(data + index) = raw_image[(i * cinfo.image_width * 3) + (j * 3) + k];
++index;
}
}
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
free(row_pointer[0]);
fclose(infile);
return true;
}
////////////////////////////////////////////////////////////////////////////////
int GetTimeStamp(const string &file_name) {
return atoi(file_name.substr(
file_name.size() - param_file_name_length + param_time_stamp_pose,
param_time_stamp_length).c_str());
}
////////////////////////////////////////////////////////////////////////////////
void AssignDepthList(vector<string> image_list, vector<string> *depth_list) {
vector<string> depth_temp;
depth_temp.swap(*depth_list);
depth_list->clear();
depth_list->reserve(image_list.size());
int idx = 0;
int depth_time = GetTimeStamp(depth_temp[idx]);
int time_low = depth_time;
for (unsigned int i = 0; i < image_list.size(); ++i) {
int image_time = GetTimeStamp(image_list[i]);
while (depth_time < image_time) {
if (idx == depth_temp.size() - 1)
break;
time_low = depth_time;
depth_time = GetTimeStamp(depth_temp[++idx]);
}
if (idx == 0 && depth_time > image_time) {
depth_list->push_back(depth_temp[idx]);
continue;
}
if (abs(image_time - time_low) < abs(depth_time - image_time)) {
depth_list->push_back(depth_temp[idx-1]);
} else {
depth_list->push_back(depth_temp[idx]);
}
}
}
////////////////////////////////////////////////////////////////////////////////
void SystemCommand(const string str) {
if (system(str.c_str()))
return;
}
////////////////////////////////////////////////////////////////////////////////
void ReComputeSecondPose() {
if (param_start_index != depth_list.size() - 1) {
// kfusion.ResetWeight(0.f);
// GetDepthData(depth_list[param_start_index], (uint16_t *)depthImage.data());
// kfusion.setKinectDeviceDepth(depthImage.getDeviceImage());
// kfusion.setPose(toMatrix4(initPose));
// kfusion.Integrate();
// kfusion.Raycast();
// hipDeviceSynchronize();
Matrix4 delta = inverse(extrinsic_poses[param_start_index]) *
extrinsic_poses[param_start_index + 1];
kfusion.pose = kfusion.pose * delta;
GetDepthData(depth_list[param_start_index + 1],
(uint16_t *)depthImage.data());
kfusion.setKinectDeviceDepth(depthImage.getDeviceImage());
hipDeviceSynchronize();
kfusion.Track();
hipDeviceSynchronize();
second_pose = inverse(toMatrix4(initPose)) * kfusion.pose;
}
}
////////////////////////////////////////////////////////////////////////////////
void display(void){
static bool first_frame = true;
static bool integrate = true;
if (param_mode == KINFU_FORWARD) {
if (file_index == param_start_index + param_frame_threshold ||
file_index == image_list.size()) {
param_mode = KINFU_BACKWARD;
file_index = param_start_index - 1;
kfusion.setPose(toMatrix4(initPose));
kfusion.Raycast();
hipDeviceSynchronize();
cout << "IDX" << endl << endl;
return;
}
#ifdef INITIAL_POSE
// T_12 = T_01^(-1) * T_02
// T_02 = T_01 * T_12;
if (file_index > 0 && file_index != param_start_index) {
Matrix4 delta = inverse(extrinsic_poses[file_index - 1]) *
extrinsic_poses[file_index];
kfusion.pose = kfusion.pose * delta;
}
#endif
} else {
if (file_index == param_start_index - param_frame_threshold ||
file_index == -1) {
// kfusion.setPose(toMatrix4(initPose));
// kfusion.Raycast();
// hipDeviceSynchronize();
//
// ReComputeSecondPose();
kfusion.setPose(toMatrix4(initPose));
kfusion.Raycast();
hipDeviceSynchronize();
SaveFusedDepthFile();
cout << "IDX" << endl << endl;
exit(0);
}
#ifdef INITIAL_POSE
Matrix4 delta = inverse(extrinsic_poses[file_index + 1]) *
extrinsic_poses[file_index];
kfusion.pose = kfusion.pose * delta;
#endif
}
cout << file_index << " ";
cout.flush();
GetDepthData(depth_list[file_index], (uint16_t *)depthImage.data());
kfusion.setKinectDeviceDepth(depthImage.getDeviceImage());
/*----------------------------------------------------------------------------*/
#if 0
// // Just integrate and raycast first frame
// kfusion.Integrate();
// kfusion.Raycast();
// SaveFusedDepthFile();
// exit(0);
#endif
#if 1
// ICP off - actually on for integrate switch
// extrinsic on
Matrix4 temp = kfusion.pose;
integrate = kfusion.Track();
kfusion.pose = temp;
#else
// ICP on
integrate = kfusion.Track();
#endif
double z_angle;
Vector<3, float> diff_t;
diff_t[0] = diff_t[1] = diff_t[2] = 0.f;
if (file_index != param_start_index) {
float3 cam_z;
cam_z.x = cam_z.y = 0.f;
cam_z.z = 1.f;
float3 wor_z = kfusion.pose * cam_z;
z_angle = acos(wor_z.z);
float3 temp_t = kfusion.pose.get_translation();
Vector<3, float> curr_t;
curr_t[0] = temp_t.x;
curr_t[1] = temp_t.y;
curr_t[2] = temp_t.z;
Vector<3, float> init_t = initPose.get_translation();
diff_t = curr_t - init_t;
}
if ((!integrate && file_index != param_start_index) ||
z_angle > angle_threshold * param_angle_factor ||
norm(diff_t) > translation_threshold * param_translation_factor ) {
if (param_mode == KINFU_FORWARD) {
param_mode = KINFU_BACKWARD;
file_index = param_start_index - 1;
kfusion.setPose(toMatrix4(initPose));
kfusion.Raycast();
hipDeviceSynchronize();
cout << "THR" << endl << endl;
return;
} else {
// kfusion.setPose(toMatrix4(initPose));
// kfusion.Raycast();
// hipDeviceSynchronize();
//
// ReComputeSecondPose();
kfusion.setPose(toMatrix4(initPose));
kfusion.Raycast();
hipDeviceSynchronize();
SaveFusedDepthFile();
cout << "THR" << endl << endl;
#if 0
// volume saving
// string vol_fn = fused_dir + "volume.txt";
// FILE *fpv = fopen(vol_fn.c_str(), "w");
//
// uint vol_size = kfusion.integration.size.x *
// kfusion.integration.size.y *
// kfusion.integration.size.z * sizeof(short2);
//
// short2 *vol_data = (short2*) malloc(vol_size);
// hipMemcpy(vol_data, kfusion.integration.data, vol_size,
// hipMemcpyDeviceToHost);
//
// for (uint x = 0; x < kfusion.integration.size.x; ++x) {
// cout << x << endl;
// for (uint y = 0; y < kfusion.integration.size.y; ++y) {
// for (uint z = 0; z < kfusion.integration.size.z; ++z) {
// short2 data = vol_data[x +
// y * kfusion.integration.size.x +
// z * kfusion.integration.size.x * kfusion.integration.size.y];
// float2 dw = make_float2(data.x * 0.00003051944088f, data.y);
// fprintf(fpv, "%f %f ", dw.x, dw.y);
// }
// }
// }
//
// fclose(fpv);
#endif
exit(0);
}
}
if (param_mode == KINFU_FORWARD)
++file_index;
else
--file_index;
/*----------------------------------------------------------------------------*/
if(integrate || first_frame) {
kfusion.Integrate();
kfusion.Raycast();
first_frame = false;
}
hipDeviceSynchronize();
if(printCUDAError())
exit(1);
// usleep(1000 * 500);
}
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char ** argv) {
cout << "=================================================================" << endl;
string server_prefix, data_prefix, server_dir, data_name;
if (argc < 5) {
cout << "Wrong arguments ..." << endl;
exit(0);
} else {
server_prefix = argv[1];
data_prefix = argv[2];
data_name = argv[3];
param_start_index = atoi(argv[4]);
}
if (argc > 5)
param_frame_threshold = atoi(argv[5]);
if (argc > 6)
param_volume_size = atoi(argv[6]);
if (argc > 7)
param_volume_dimension = atof(argv[7]);
if (argc > 8)
param_angle_factor = atof(argv[8]);
if (argc > 9)
param_translation_factor = atof(argv[9]);
if (argc > 10)
param_rsme_threshold = atof(argv[10]);
server_dir = server_prefix + data_name;
image_dir = server_dir + "image/";
depth_dir = server_dir + "depth/";
extrinsic_dir = server_dir + "extrinsics/";
data_dir = data_prefix + data_name;
fused_dir = data_dir + "depthTSDF/";
#ifdef RESOLUTION_1280X960
fused_dir = data_dir + "depth1280x960/";
#endif
SystemCommand("mkdir -p " + fused_dir);
file_index = param_start_index;
size = param_volume_dimension;
GetFileNames(image_dir, &image_list);
GetFileNames(depth_dir, &depth_list);
GetFileNames(extrinsic_dir, &extrinsic_list);
AssignDepthList(image_list, &depth_list);
#ifdef INITIAL_POSE
string extrinsic_name = extrinsic_list[extrinsic_list.size() - 1];
// string extrinsic_name = extrinsic_list[1];
GetExtrinsicData(extrinsic_name, &extrinsic_poses);
cout << extrinsic_name << endl;
#endif
float fx, fy, cx, cy, ff;
string intrinsic = server_dir + "intrinsics.txt";
FILE *fp = fopen(intrinsic.c_str(), "r");
if (fscanf(fp, "%f", &fx));
if (fscanf(fp, "%f", &ff));
if (fscanf(fp, "%f", &cx));
if (fscanf(fp, "%f", &ff));
if (fscanf(fp, "%f", &fy));
if (fscanf(fp, "%f", &cy));
angle_threshold = (float) atan(cy / fy);
translation_threshold = 1.0f * cy / fy;
/*----------------------------------------------------------------------------*/
KFusionConfig config;
config.volumeSize = make_uint3(param_volume_size);
// these are physical dimensions in meters
config.volumeDimensions = make_float3(size);
config.nearPlane = 0.4f;
config.farPlane = 5.0f;
config.mu = 0.1;
config.combinedTrackAndReduce = false;
uint2 input_size = make_uint2(kImageCols, kImageRows);
config.inputSize = input_size;
config.camera = make_float4(fx, fy, cx, cy);
config.rsme_threshold = param_rsme_threshold;
config.iterations[0] = 10;
config.iterations[1] = 5;
config.iterations[2] = 4;
initPose = SE3<float>(makeVector(size/2, size/2, 0, 0, 0, 0));
kfusion.Init(config);
// input buffers
depthImage.alloc(input_size);
// render buffers
if(printCUDAError()) {
hipDeviceReset();
return 1;
}
memset(depthImage.data(), 0, depthImage.size.x * depthImage.size.y * sizeof(uint16_t));
#ifdef RESOLUTION_1280X960
fusedDepth.alloc(input_size * 2);
#else
fusedDepth.alloc(input_size);
#endif
kfusion.setPose(toMatrix4(initPose));
while(1) {
display();
if(stop_run)
break;
}
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// sh run_sh ~/data/sun3d/ ~/data/sun3d/ hotel_umd/maryland_hotel3/
// scp maryland_hotel3.tar.gz [email protected]:/home/alan/data/sun3d/hotel_umd/
|
7e34baeb43ce10c785a54cf808d2756b405e106d.cu
|
/*
Copyright (c) 2011-2013 Gerhard Reitmayr, TU Graz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "kfusion.h"
#include "helpers.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <cstring>
#include <dirent.h>
#include <cerrno>
#include <cmath>
#include <png++/png.hpp>
#include <jpeglib.h>
using namespace std;
using namespace TooN;
KFusion kfusion;
Image<uint16_t, HostDevice> depthImage;
SE3<float> initPose;
Matrix4 second_pose;
float size;
bool stop_run = false;
/*============================================================================*/
Image<uint16_t, HostDevice> fusedDepth;
////////////////////////////////////////////////////////////////////////////////
// global parameter
int param_start_index = -1;
int param_volume_size = 640;
float param_volume_dimension = 4.f;
int param_frame_threshold = 11;
float param_angle_factor = 1.f;
float param_translation_factor = 1.f;
float param_rsme_threshold = 1.5e-2f;
int param_file_name_length = 24;
int param_time_stamp_pose = 8;
int param_time_stamp_length = 12;
enum KinfuMode {KINFU_FORWARD, KINFU_BACKWARD};
KinfuMode param_mode = KINFU_FORWARD;
// voxel resolution: 0.01 meter
////////////////////////////////////////////////////////////////////////////////
int file_index;
float angle_threshold, translation_threshold;
const int kImageRows = 480;
const int kImageCols = 640;
const int kImageChannels = 3;
vector<string> image_list;
vector<string> depth_list;
vector<string> extrinsic_list;
#ifdef INITIAL_POSE
vector<Matrix4> extrinsic_poses;
#endif
string data_dir, image_dir, depth_dir, fused_dir, extrinsic_dir;
////////////////////////////////////////////////////////////////////////////////
void GetFileNames(const string dir, vector<string> *file_list) {
DIR *dp;
struct dirent *dirp;
if((dp = opendir(dir.c_str())) == NULL) {
cout << "Error(" << errno << ") opening " << dir << endl;
}
while ((dirp = readdir(dp)) != NULL) {
file_list->push_back(dir + string(dirp->d_name));
}
closedir(dp);
sort( file_list->begin(), file_list->end() );
file_list->erase(file_list->begin()); //.
file_list->erase(file_list->begin()); //..
}
////////////////////////////////////////////////////////////////////////////////
bool GetDepthData(string file_name, uint16_t *data) {
png::image< png::gray_pixel_16 > img(file_name.c_str(),
png::require_color_space< png::gray_pixel_16 >());
int index = 0;
for (int i = 0; i < kImageRows; ++i) {
for (int j = 0; j < kImageCols; ++j) {
uint16_t s = img.get_pixel(j, i);
*(data + index) = (s << 13 | s >> 3);
++index;
}
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
void SaveFusedDepthFile() {
string depth_full_name = depth_list[param_start_index];
string depth_serial_name = depth_full_name.substr(
depth_full_name.size() - param_file_name_length, param_file_name_length);
string fused_full_name = fused_dir + depth_serial_name;
#ifdef RESOLUTION_1280X960
png::image<png::gray_pixel_16> img(kImageCols * 2, kImageRows * 2);
kfusion.Raycast_2();
renderFusedMap(fusedDepth.getDeviceImage(), kfusion.vertex_2);
for (int i = 0; i < kImageRows * 2; ++i) {
for (int j = 0; j < kImageCols * 2; ++j) {
uint16_t s = fusedDepth[make_uint2(j,i)];
img[i][j] = (s >> 13 | s << 3);
}
}
#else
png::image<png::gray_pixel_16> img(kImageCols, kImageRows);
renderFusedMap(fusedDepth.getDeviceImage(), kfusion.vertex);
cudaDeviceSynchronize();
for (int i = 0; i < kImageRows; ++i) {
for (int j = 0; j < kImageCols; ++j) {
uint16_t s = fusedDepth[make_uint2(j,i)];
img[i][j] = (s >> 13 | s << 3);
}
}
#endif
img.write(fused_full_name.c_str());
#if 0
string pose_txt_name = data_dir + "poseTSDF.txt";
ofstream pose_file;
pose_file.open(pose_txt_name.c_str(), fstream::app);
pose_file.precision(60);
for (int i = 0; i < 3; ++i) {
pose_file << second_pose.data[i].x << "\t";
pose_file << second_pose.data[i].y << "\t";
pose_file << second_pose.data[i].z << "\t";
pose_file << second_pose.data[i].w << "\n";
}
pose_file.close();
#endif
}
////////////////////////////////////////////////////////////////////////////////
bool GetExtrinsicData(string file_name, vector<Matrix4> *poses) {
FILE *fp = fopen(file_name.c_str(), "r");
for (int i = 0; i < image_list.size(); ++i) {
Matrix4 m;
for (int d = 0; d < 3; ++d) {
if (fscanf(fp, "%f", &m.data[d].x));
if (fscanf(fp, "%f", &m.data[d].y));
if (fscanf(fp, "%f", &m.data[d].z));
if (fscanf(fp, "%f", &m.data[d].w));
}
m.data[3].x = m.data[3].y = m.data[3].z = 0.f;
m.data[3].w = 1.f;
poses->push_back(m);
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
bool GetImageData(string file_name, unsigned char *data) {
unsigned char *raw_image = NULL;
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPROW row_pointer[1];
FILE *infile = fopen(file_name.c_str(), "rb");
unsigned long location = 0;
if (!infile) {
printf("Error opening jpeg file %s\n!", file_name.c_str());
return -1;
}
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, infile);
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
raw_image = (unsigned char*) malloc(
cinfo.output_width * cinfo.output_height * cinfo.num_components);
row_pointer[0] = (unsigned char *) malloc(
cinfo.output_width * cinfo.num_components);
while (cinfo.output_scanline < cinfo.image_height) {
jpeg_read_scanlines(&cinfo, row_pointer, 1);
for (uint i = 0; i < cinfo.image_width * cinfo.num_components; i++)
raw_image[location++] = row_pointer[0][i];
}
int index = 0;
for (uint i = 0; i < cinfo.image_height; ++i) {
for (uint j = 0; j < cinfo.image_width; ++j) {
for (int k = 0; k < kImageChannels; ++k) {
*(data + index) = raw_image[(i * cinfo.image_width * 3) + (j * 3) + k];
++index;
}
}
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
free(row_pointer[0]);
fclose(infile);
return true;
}
////////////////////////////////////////////////////////////////////////////////
int GetTimeStamp(const string &file_name) {
return atoi(file_name.substr(
file_name.size() - param_file_name_length + param_time_stamp_pose,
param_time_stamp_length).c_str());
}
////////////////////////////////////////////////////////////////////////////////
void AssignDepthList(vector<string> image_list, vector<string> *depth_list) {
vector<string> depth_temp;
depth_temp.swap(*depth_list);
depth_list->clear();
depth_list->reserve(image_list.size());
int idx = 0;
int depth_time = GetTimeStamp(depth_temp[idx]);
int time_low = depth_time;
for (unsigned int i = 0; i < image_list.size(); ++i) {
int image_time = GetTimeStamp(image_list[i]);
while (depth_time < image_time) {
if (idx == depth_temp.size() - 1)
break;
time_low = depth_time;
depth_time = GetTimeStamp(depth_temp[++idx]);
}
if (idx == 0 && depth_time > image_time) {
depth_list->push_back(depth_temp[idx]);
continue;
}
if (abs(image_time - time_low) < abs(depth_time - image_time)) {
depth_list->push_back(depth_temp[idx-1]);
} else {
depth_list->push_back(depth_temp[idx]);
}
}
}
////////////////////////////////////////////////////////////////////////////////
void SystemCommand(const string str) {
if (system(str.c_str()))
return;
}
////////////////////////////////////////////////////////////////////////////////
void ReComputeSecondPose() {
if (param_start_index != depth_list.size() - 1) {
// kfusion.ResetWeight(0.f);
// GetDepthData(depth_list[param_start_index], (uint16_t *)depthImage.data());
// kfusion.setKinectDeviceDepth(depthImage.getDeviceImage());
// kfusion.setPose(toMatrix4(initPose));
// kfusion.Integrate();
// kfusion.Raycast();
// cudaDeviceSynchronize();
Matrix4 delta = inverse(extrinsic_poses[param_start_index]) *
extrinsic_poses[param_start_index + 1];
kfusion.pose = kfusion.pose * delta;
GetDepthData(depth_list[param_start_index + 1],
(uint16_t *)depthImage.data());
kfusion.setKinectDeviceDepth(depthImage.getDeviceImage());
cudaDeviceSynchronize();
kfusion.Track();
cudaDeviceSynchronize();
second_pose = inverse(toMatrix4(initPose)) * kfusion.pose;
}
}
////////////////////////////////////////////////////////////////////////////////
void display(void){
static bool first_frame = true;
static bool integrate = true;
if (param_mode == KINFU_FORWARD) {
if (file_index == param_start_index + param_frame_threshold ||
file_index == image_list.size()) {
param_mode = KINFU_BACKWARD;
file_index = param_start_index - 1;
kfusion.setPose(toMatrix4(initPose));
kfusion.Raycast();
cudaDeviceSynchronize();
cout << "IDX" << endl << endl;
return;
}
#ifdef INITIAL_POSE
// T_12 = T_01^(-1) * T_02
// T_02 = T_01 * T_12;
if (file_index > 0 && file_index != param_start_index) {
Matrix4 delta = inverse(extrinsic_poses[file_index - 1]) *
extrinsic_poses[file_index];
kfusion.pose = kfusion.pose * delta;
}
#endif
} else {
if (file_index == param_start_index - param_frame_threshold ||
file_index == -1) {
// kfusion.setPose(toMatrix4(initPose));
// kfusion.Raycast();
// cudaDeviceSynchronize();
//
// ReComputeSecondPose();
kfusion.setPose(toMatrix4(initPose));
kfusion.Raycast();
cudaDeviceSynchronize();
SaveFusedDepthFile();
cout << "IDX" << endl << endl;
exit(0);
}
#ifdef INITIAL_POSE
Matrix4 delta = inverse(extrinsic_poses[file_index + 1]) *
extrinsic_poses[file_index];
kfusion.pose = kfusion.pose * delta;
#endif
}
cout << file_index << " ";
cout.flush();
GetDepthData(depth_list[file_index], (uint16_t *)depthImage.data());
kfusion.setKinectDeviceDepth(depthImage.getDeviceImage());
/*----------------------------------------------------------------------------*/
#if 0
// // Just integrate and raycast first frame
// kfusion.Integrate();
// kfusion.Raycast();
// SaveFusedDepthFile();
// exit(0);
#endif
#if 1
// ICP off - actually on for integrate switch
// extrinsic on
Matrix4 temp = kfusion.pose;
integrate = kfusion.Track();
kfusion.pose = temp;
#else
// ICP on
integrate = kfusion.Track();
#endif
double z_angle;
Vector<3, float> diff_t;
diff_t[0] = diff_t[1] = diff_t[2] = 0.f;
if (file_index != param_start_index) {
float3 cam_z;
cam_z.x = cam_z.y = 0.f;
cam_z.z = 1.f;
float3 wor_z = kfusion.pose * cam_z;
z_angle = acos(wor_z.z);
float3 temp_t = kfusion.pose.get_translation();
Vector<3, float> curr_t;
curr_t[0] = temp_t.x;
curr_t[1] = temp_t.y;
curr_t[2] = temp_t.z;
Vector<3, float> init_t = initPose.get_translation();
diff_t = curr_t - init_t;
}
if ((!integrate && file_index != param_start_index) ||
z_angle > angle_threshold * param_angle_factor ||
norm(diff_t) > translation_threshold * param_translation_factor ) {
if (param_mode == KINFU_FORWARD) {
param_mode = KINFU_BACKWARD;
file_index = param_start_index - 1;
kfusion.setPose(toMatrix4(initPose));
kfusion.Raycast();
cudaDeviceSynchronize();
cout << "THR" << endl << endl;
return;
} else {
// kfusion.setPose(toMatrix4(initPose));
// kfusion.Raycast();
// cudaDeviceSynchronize();
//
// ReComputeSecondPose();
kfusion.setPose(toMatrix4(initPose));
kfusion.Raycast();
cudaDeviceSynchronize();
SaveFusedDepthFile();
cout << "THR" << endl << endl;
#if 0
// volume saving
// string vol_fn = fused_dir + "volume.txt";
// FILE *fpv = fopen(vol_fn.c_str(), "w");
//
// uint vol_size = kfusion.integration.size.x *
// kfusion.integration.size.y *
// kfusion.integration.size.z * sizeof(short2);
//
// short2 *vol_data = (short2*) malloc(vol_size);
// cudaMemcpy(vol_data, kfusion.integration.data, vol_size,
// cudaMemcpyDeviceToHost);
//
// for (uint x = 0; x < kfusion.integration.size.x; ++x) {
// cout << x << endl;
// for (uint y = 0; y < kfusion.integration.size.y; ++y) {
// for (uint z = 0; z < kfusion.integration.size.z; ++z) {
// short2 data = vol_data[x +
// y * kfusion.integration.size.x +
// z * kfusion.integration.size.x * kfusion.integration.size.y];
// float2 dw = make_float2(data.x * 0.00003051944088f, data.y);
// fprintf(fpv, "%f %f ", dw.x, dw.y);
// }
// }
// }
//
// fclose(fpv);
#endif
exit(0);
}
}
if (param_mode == KINFU_FORWARD)
++file_index;
else
--file_index;
/*----------------------------------------------------------------------------*/
if(integrate || first_frame) {
kfusion.Integrate();
kfusion.Raycast();
first_frame = false;
}
cudaDeviceSynchronize();
if(printCUDAError())
exit(1);
// usleep(1000 * 500);
}
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char ** argv) {
cout << "=================================================================" << endl;
string server_prefix, data_prefix, server_dir, data_name;
if (argc < 5) {
cout << "Wrong arguments ..." << endl;
exit(0);
} else {
server_prefix = argv[1];
data_prefix = argv[2];
data_name = argv[3];
param_start_index = atoi(argv[4]);
}
if (argc > 5)
param_frame_threshold = atoi(argv[5]);
if (argc > 6)
param_volume_size = atoi(argv[6]);
if (argc > 7)
param_volume_dimension = atof(argv[7]);
if (argc > 8)
param_angle_factor = atof(argv[8]);
if (argc > 9)
param_translation_factor = atof(argv[9]);
if (argc > 10)
param_rsme_threshold = atof(argv[10]);
server_dir = server_prefix + data_name;
image_dir = server_dir + "image/";
depth_dir = server_dir + "depth/";
extrinsic_dir = server_dir + "extrinsics/";
data_dir = data_prefix + data_name;
fused_dir = data_dir + "depthTSDF/";
#ifdef RESOLUTION_1280X960
fused_dir = data_dir + "depth1280x960/";
#endif
SystemCommand("mkdir -p " + fused_dir);
file_index = param_start_index;
size = param_volume_dimension;
GetFileNames(image_dir, &image_list);
GetFileNames(depth_dir, &depth_list);
GetFileNames(extrinsic_dir, &extrinsic_list);
AssignDepthList(image_list, &depth_list);
#ifdef INITIAL_POSE
string extrinsic_name = extrinsic_list[extrinsic_list.size() - 1];
// string extrinsic_name = extrinsic_list[1];
GetExtrinsicData(extrinsic_name, &extrinsic_poses);
cout << extrinsic_name << endl;
#endif
float fx, fy, cx, cy, ff;
string intrinsic = server_dir + "intrinsics.txt";
FILE *fp = fopen(intrinsic.c_str(), "r");
if (fscanf(fp, "%f", &fx));
if (fscanf(fp, "%f", &ff));
if (fscanf(fp, "%f", &cx));
if (fscanf(fp, "%f", &ff));
if (fscanf(fp, "%f", &fy));
if (fscanf(fp, "%f", &cy));
angle_threshold = (float) atan(cy / fy);
translation_threshold = 1.0f * cy / fy;
/*----------------------------------------------------------------------------*/
KFusionConfig config;
config.volumeSize = make_uint3(param_volume_size);
// these are physical dimensions in meters
config.volumeDimensions = make_float3(size);
config.nearPlane = 0.4f;
config.farPlane = 5.0f;
config.mu = 0.1;
config.combinedTrackAndReduce = false;
uint2 input_size = make_uint2(kImageCols, kImageRows);
config.inputSize = input_size;
config.camera = make_float4(fx, fy, cx, cy);
config.rsme_threshold = param_rsme_threshold;
config.iterations[0] = 10;
config.iterations[1] = 5;
config.iterations[2] = 4;
initPose = SE3<float>(makeVector(size/2, size/2, 0, 0, 0, 0));
kfusion.Init(config);
// input buffers
depthImage.alloc(input_size);
// render buffers
if(printCUDAError()) {
cudaDeviceReset();
return 1;
}
memset(depthImage.data(), 0, depthImage.size.x * depthImage.size.y * sizeof(uint16_t));
#ifdef RESOLUTION_1280X960
fusedDepth.alloc(input_size * 2);
#else
fusedDepth.alloc(input_size);
#endif
kfusion.setPose(toMatrix4(initPose));
while(1) {
display();
if(stop_run)
break;
}
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// sh run_sh ~/data/sun3d/ ~/data/sun3d/ hotel_umd/maryland_hotel3/
// scp maryland_hotel3.tar.gz [email protected]:/home/alan/data/sun3d/hotel_umd/
|
94b91342f3d89f08a2910f1341d475da49c6c65d.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/base_fixture.hpp>
#include <utilities/high_res_timer.hpp>
#include <utilities/test_utilities.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/experimental/graph.hpp>
#include <cugraph/experimental/graph_view.hpp>
#include <raft/cudart_utils.h>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <rmm/thrust_rmm_allocator.h>
#include <algorithm>
#include <tuple>
#include <vector>
#include <hip/hip_runtime_api.h>
typedef struct InducedEgo_Usecase_t {
std::string graph_file_full_path{};
std::vector<int32_t> ego_sources{};
int32_t radius;
bool test_weighted{false};
InducedEgo_Usecase_t(std::string const& graph_file_path,
std::vector<int32_t> const& ego_sources,
int32_t radius,
bool test_weighted)
: ego_sources(ego_sources), radius(radius), test_weighted(test_weighted)
{
if ((graph_file_path.length() > 0) && (graph_file_path[0] != '/')) {
graph_file_full_path = cugraph::test::get_rapids_dataset_root_dir() + "/" + graph_file_path;
} else {
graph_file_full_path = graph_file_path;
}
};
} InducedEgo_Usecase;
class Tests_InducedEgo : public ::testing::TestWithParam<InducedEgo_Usecase> {
public:
Tests_InducedEgo() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
template <typename vertex_t, typename edge_t, typename weight_t, bool store_transposed>
void run_current_test(InducedEgo_Usecase const& configuration)
{
int n_streams = ::min(configuration.ego_sources.size(), static_cast<std::size_t>(128));
raft::handle_t handle(n_streams);
cugraph::experimental::graph_t<vertex_t, edge_t, weight_t, store_transposed, false> graph(
handle);
std::tie(graph, std::ignore) = cugraph::test::
read_graph_from_matrix_market_file<vertex_t, edge_t, weight_t, store_transposed, false>(
handle, configuration.graph_file_full_path, configuration.test_weighted, false);
auto graph_view = graph.view();
rmm::device_uvector<vertex_t> d_ego_sources(configuration.ego_sources.size(),
handle.get_stream());
raft::update_device(d_ego_sources.data(),
configuration.ego_sources.data(),
configuration.ego_sources.size(),
handle.get_stream());
HighResTimer hr_timer;
hr_timer.start("egonet");
hipProfilerStart();
auto [d_ego_edgelist_src, d_ego_edgelist_dst, d_ego_edgelist_weights, d_ego_edge_offsets] =
cugraph::experimental::extract_ego(handle,
graph_view,
d_ego_sources.data(),
static_cast<vertex_t>(configuration.ego_sources.size()),
configuration.radius);
hipProfilerStop();
hr_timer.stop();
hr_timer.display(std::cout);
std::vector<size_t> h_cugraph_ego_edge_offsets(d_ego_edge_offsets.size());
std::vector<vertex_t> h_cugraph_ego_edgelist_src(d_ego_edgelist_src.size());
std::vector<vertex_t> h_cugraph_ego_edgelist_dst(d_ego_edgelist_dst.size());
raft::update_host(h_cugraph_ego_edgelist_src.data(),
d_ego_edgelist_src.data(),
d_ego_edgelist_src.size(),
handle.get_stream());
raft::update_host(h_cugraph_ego_edgelist_dst.data(),
d_ego_edgelist_dst.data(),
d_ego_edgelist_dst.size(),
handle.get_stream());
raft::update_host(h_cugraph_ego_edge_offsets.data(),
d_ego_edge_offsets.data(),
d_ego_edge_offsets.size(),
handle.get_stream());
ASSERT_TRUE(d_ego_edge_offsets.size() == (configuration.ego_sources.size() + 1));
ASSERT_TRUE(d_ego_edgelist_src.size() == d_ego_edgelist_dst.size());
if (configuration.test_weighted)
ASSERT_TRUE(d_ego_edgelist_src.size() == (*d_ego_edgelist_weights).size());
ASSERT_TRUE(h_cugraph_ego_edge_offsets[configuration.ego_sources.size()] ==
d_ego_edgelist_src.size());
for (size_t i = 0; i < configuration.ego_sources.size(); i++)
ASSERT_TRUE(h_cugraph_ego_edge_offsets[i] <= h_cugraph_ego_edge_offsets[i + 1]);
auto n_vertices = graph_view.get_number_of_vertices();
for (size_t i = 0; i < d_ego_edgelist_src.size(); i++) {
ASSERT_TRUE(
cugraph::experimental::is_valid_vertex(n_vertices, h_cugraph_ego_edgelist_src[i]));
ASSERT_TRUE(
cugraph::experimental::is_valid_vertex(n_vertices, h_cugraph_ego_edgelist_dst[i]));
}
}
};
TEST_P(Tests_InducedEgo, CheckInt32Int32FloatUntransposed)
{
run_current_test<int32_t, int32_t, float, false>(GetParam());
}
INSTANTIATE_TEST_SUITE_P(
simple_test,
Tests_InducedEgo,
::testing::Values(
InducedEgo_Usecase("test/datasets/karate.mtx", std::vector<int32_t>{0}, 1, false),
InducedEgo_Usecase("test/datasets/karate.mtx", std::vector<int32_t>{0}, 2, false),
InducedEgo_Usecase("test/datasets/karate.mtx", std::vector<int32_t>{1}, 3, false),
InducedEgo_Usecase("test/datasets/karate.mtx", std::vector<int32_t>{10, 0, 5}, 2, false),
InducedEgo_Usecase("test/datasets/karate.mtx", std::vector<int32_t>{9, 3, 10}, 2, false),
InducedEgo_Usecase(
"test/datasets/karate.mtx", std::vector<int32_t>{5, 9, 3, 10, 12, 13}, 2, true)));
// For perf analysis
/*
INSTANTIATE_TEST_SUITE_P(
simple_test,
Tests_InducedEgo,
::testing::Values(
InducedEgo_Usecase("test/datasets/soc-LiveJournal1.mtx", std::vector<int32_t>{0}, 1, false),
InducedEgo_Usecase("test/datasets/soc-LiveJournal1.mtx", std::vector<int32_t>{0}, 2, false),
InducedEgo_Usecase("test/datasets/soc-LiveJournal1.mtx", std::vector<int32_t>{0}, 3, false),
InducedEgo_Usecase("test/datasets/soc-LiveJournal1.mtx", std::vector<int32_t>{0}, 4, false),
InducedEgo_Usecase("test/datasets/soc-LiveJournal1.mtx", std::vector<int32_t>{0}, 5, false),
InducedEgo_Usecase(
"test/datasets/soc-LiveJournal1.mtx", std::vector<int32_t>{363617}, 2, false),
InducedEgo_Usecase(
"test/datasets/soc-LiveJournal1.mtx",
std::vector<int32_t>{
363617, 722214, 2337449, 2510183, 2513389, 225853, 2035807, 3836330, 1865496, 28755},
2,
false),
InducedEgo_Usecase(
"test/datasets/soc-LiveJournal1.mtx",
std::vector<int32_t>{
363617, 722214, 2337449, 2510183, 2513389, 225853, 2035807, 3836330, 1865496, 28755,
2536834, 3070144, 3888415, 3131712, 2382526, 1040771, 2631543, 4607218, 4465829, 3341686,
2772973, 2611175, 4526129, 2624421, 1220593, 2593137, 3270705, 1503899, 1213033, 4840102,
4529036, 3421116, 4264831, 4089751, 4272322, 3486998, 2830318, 320953, 2388331, 520808,
3023094, 1600294, 3631119, 1716614, 4829213, 1175844, 960680, 847662, 3277365, 3957318,
3455123, 2454259, 670953, 4465677, 1027332, 2560721, 89061, 1163406, 3109528, 3221856,
4714426, 2382774, 37828, 4433616, 3283229, 591911, 4200188, 442522, 872207, 2437601,
741003, 266241, 914618, 3626195, 2021080, 4679624, 777476, 2527796, 1114017, 640142,
49259, 4069879, 3869098, 1105040, 4707804, 3208582, 3325885, 1450601, 4072548, 2037062,
2029646, 4575891, 1488598, 79105, 4827273, 3795434, 4647518, 4733397, 3980718, 1184627},
2,
false),
InducedEgo_Usecase(
"test/datasets/soc-LiveJournal1.mtx",
std::vector<int32_t>{
363617, 722214, 2337449, 2510183, 2513389, 225853, 2035807, 3836330, 1865496, 28755,
2536834, 3070144, 3888415, 3131712, 2382526, 1040771, 2631543, 4607218, 4465829, 3341686,
2772973, 2611175, 4526129, 2624421, 1220593, 2593137, 3270705, 1503899, 1213033, 4840102,
4529036, 3421116, 4264831, 4089751, 4272322, 3486998, 2830318, 320953, 2388331, 520808,
3023094, 1600294, 3631119, 1716614, 4829213, 1175844, 960680, 847662, 3277365, 3957318,
3455123, 2454259, 670953, 4465677, 1027332, 2560721, 89061, 1163406, 3109528, 3221856,
4714426, 2382774, 37828, 4433616, 3283229, 591911, 4200188, 442522, 872207, 2437601,
741003, 266241, 914618, 3626195, 2021080, 4679624, 777476, 2527796, 1114017, 640142,
49259, 4069879, 3869098, 1105040, 4707804, 3208582, 3325885, 1450601, 4072548, 2037062,
2029646, 4575891, 1488598, 79105, 4827273, 3795434, 4647518, 4733397, 3980718, 1184627,
984983, 3114832, 1967741, 1599818, 144593, 2698770, 2889449, 2495550, 1053813, 1193622,
686026, 3989015, 2040719, 4693428, 3190376, 2926728, 3399030, 1664419, 662429, 4526841,
2186957, 3752558, 2440046, 2930226, 3633006, 4058166, 3137060, 3499296, 2126343, 148971,
2199672, 275811, 2813976, 2274536, 1189239, 1335942, 2465624, 2596042, 829684, 193400,
2682845, 3691697, 4022437, 4051170, 4195175, 2876420, 3984220, 2174475, 326134, 2606530,
2493046, 4706121, 1498980, 4576225, 1271339, 44832, 1875673, 4664940, 134931, 736397,
4333554, 2751031, 2163610, 2879676, 3174153, 3317403, 2052464, 1881883, 4757859, 3596257,
2358088, 2578758, 447504, 590720, 1717038, 1869795, 1133885, 3027521, 840312, 2818881,
3654321, 2730947, 353585, 1134903, 2223378, 1508824, 3662521, 1363776, 2712071, 288441,
1204581, 3502242, 4645567, 2767267, 1514366, 3956099, 1422145, 1216608, 2253360, 189132,
4238225, 1345783, 451571, 1599442, 3237284, 4711405, 929446, 1857675, 150759, 1277633,
761210, 138628, 1026833, 2599544, 2464737, 989203, 3399615, 2144292, 216142, 637312,
2044964, 716256, 1660632, 1762919, 4784357, 2213415, 2764769, 291806, 609772, 3264819,
1870953, 1516385, 235647, 1045474, 2664957, 819095, 1824119, 4045271, 4448109, 1676788,
4285177, 1580502, 3546548, 2771971, 3927086, 1339779, 3156204, 1730998, 1172522, 2433024,
4533449, 479930, 2010695, 672994, 3542039, 3176455, 26352, 2137735, 866910, 4410835,
2623982, 3603159, 2555625, 2765653, 267865, 2015523, 1009052, 4713994, 1600667, 2176195,
3179631, 4570390, 2018424, 3356384, 1784287, 894861, 3622099, 1647273, 3044136, 950354,
1491760, 3416929, 3757300, 2244912, 4129215, 1600848, 3867343, 72329, 919189, 992521,
3445975, 4712557, 4680974, 188419, 2612093, 1991268, 3566207, 2281468, 3859078, 2492806,
3398628, 763441, 2679107, 2554420, 2130132, 4664374, 1182901, 3890770, 4714667, 4209303,
4013060, 3617653, 2040022, 3296519, 4190671, 1693353, 2678411, 3788834, 2781815, 191965,
1083926, 503974, 3529226, 1650522, 1900976, 542080, 3423929, 3418905, 878165, 4701703,
3022790, 4316365, 76365, 4053672, 1358185, 3830478, 4445661, 3210024, 1895915, 4541133,
2938808, 562788, 3920065, 1458776, 4052046, 2967475, 1092809, 3203538, 159626, 3399464,
214467, 3343982, 1811854, 3189045, 4272117, 4701563, 424807, 4341116, 760545, 4674683,
1538018, 386762, 194237, 2162719, 1694433, 943728, 2389036, 2196653, 3085571, 1513424,
3689413, 3278747, 4197291, 3324063, 3651090, 1737936, 2768803, 2768889, 3108096, 4311775,
3569480, 886705, 733256, 2477493, 1735412, 2960895, 1983781, 1861797, 3566460, 4537673,
1164093, 3499764, 4553071, 3518985, 847658, 918948, 2922351, 1056144, 652895, 1013195,
780505, 1702928, 3562838, 1432719, 2405207, 1054920, 641647, 2240939, 3617702, 383165,
652641, 879593, 1810739, 2096385, 4497865, 4768530, 1743968, 3582014, 1025009, 3002122,
2422190, 527647, 1251821, 2571153, 4095874, 3705333, 3637407, 1385567, 4043855, 4041930,
2433139, 1710383, 1127734, 4362316, 711588, 817839, 3214775, 910077, 1313768, 2382229,
16864, 2081770, 3095420, 3195272, 548711, 2259860, 1167323, 2435974, 425238, 2085179,
2630042, 2632881, 2867923, 3703565, 1037695, 226617, 4379130, 1541468, 3581937, 605965,
1137674, 4655221, 4769963, 1394370, 4425315, 2990132, 2364485, 1561137, 2713384, 481509,
2900382, 934766, 2986774, 1767669, 298593, 2502539, 139296, 3794229, 4002180, 4718138,
2909238, 423691, 3023810, 2784924, 2760160, 1971980, 316683, 3828090, 3253691, 4839313,
1203624, 584938, 3901482, 1747543, 1572737, 3533226, 774708, 1691195, 1037110, 1557763,
225120, 4424243, 3524086, 1717663, 4332507, 3513592, 4274932, 1232118, 873498, 1416042,
2488925, 111391, 4704545, 4492545, 445317, 1584812, 2187737, 2471948, 3731678, 219255,
2282627, 2589971, 2372185, 4609096, 3673961, 2524410, 12823, 2437155, 3015974, 4188352,
3184084, 3690756, 1222341, 1278376, 3652030, 4162647, 326548, 3930062, 3926100, 1551222,
2722165, 4526695, 3997534, 4815513, 3139056, 2547644, 3028915, 4149092, 3656554, 2691582,
2676699, 1878842, 260174, 3129900, 4379993, 182347, 2189338, 3783616, 2616666, 2596952,
243007, 4179282, 2730, 1939894, 2332032, 3335636, 182332, 3112260, 2174584, 587481,
4527368, 3154106, 3403059, 673206, 2150292, 446521, 1600204, 4819428, 2591357, 48490,
2917012, 2285923, 1072926, 2824281, 4364250, 956033, 311938, 37251, 3729300, 2726300,
644966, 1623020, 1419070, 4646747, 2417222, 2680238, 2561083, 1793801, 2349366, 339747,
611366, 4684147, 4356907, 1277161, 4510381, 3218352, 4161658, 3200733, 1172372, 3997786,
3169266, 3353418, 2248955, 2875885, 2365369, 498208, 2968066, 2681505, 2059048, 2097106,
3607540, 1121504, 2016789, 1762605, 3138431, 866081, 3705757, 3833066, 2599788, 760816,
4046672, 1544367, 2983906, 4842911, 209599, 1250954, 3333704, 561212, 4674336, 2831841,
3690724, 2929360, 4830834, 1177524, 2487687, 3525137, 875283, 651241, 2110742, 1296646,
1543739, 4349417, 2384725, 1931751, 1519208, 1520034, 3385008, 3219962, 734912, 170230,
1741419, 729913, 2860117, 2362381, 1199807, 2424230, 177824, 125948, 2722701, 4687548,
1140771, 3232742, 4522020, 4376360, 1125603, 590312, 2481884, 138951, 4086775, 615155,
3395781, 4587272, 283209, 568470, 4296185, 4344150, 2454321, 2672602, 838828, 4051647,
1709120, 3074610, 693235, 4356087, 3018806, 239410, 2431497, 691186, 766276, 4462126,
859155, 2370304, 1571808, 1938673, 1694955, 3871296, 4245059, 3987376, 301524, 2512461,
3410437, 3300380, 684922, 4581995, 3599557, 683515, 1850634, 3704678, 1937490, 2035591,
3718533, 2065879, 3160765, 1467884, 1912241, 2501509, 3668572, 3390469, 2501150, 612319,
713633, 1976262, 135946, 3641535, 632083, 13414, 4217765, 4137712, 2550250, 3281035,
4179598, 961045, 2020694, 4380006, 1345936, 289162, 1359035, 770872, 4509911, 3947317,
4719693, 248568, 2625660, 1237232, 2153208, 4814282, 1259954, 3677369, 861222, 2883506,
3339149, 3998335, 491017, 1609022, 2648112, 742132, 649609, 4206953, 3131106, 3504814,
3344486, 611721, 3215620, 2856233, 4447505, 1949222, 1868345, 712710, 6966, 4730666,
3181872, 2972889, 3038521, 3525444, 4385208, 1845613, 1124187, 2030476, 4468651, 2478792,
3473580, 3783357, 1852991, 1648485, 871319, 1670723, 4458328, 3218600, 1811100, 3443356,
2233873, 3035207, 2548692, 3337891, 3773674, 1552957, 4782811, 3144712, 3523466, 1491315,
3955852, 1838410, 3164028, 1092543, 776459, 2959379, 2541744, 4064418, 3908320, 2854145,
3960709, 1348188, 977678, 853619, 1304291, 2848702, 1657913, 1319826, 3322665, 788037,
2913686, 4471279, 1766285, 348304, 56570, 1892118, 4017244, 401006, 3524539, 4310134,
1624693, 4081113, 957511, 849400, 129975, 2616130, 378537, 1556787, 3916162, 1039980,
4407778, 2027690, 4213675, 839863, 683134, 75805, 2493150, 4215796, 81587, 751845,
1255588, 1947964, 1950470, 859401, 3077088, 3931110, 2316256, 1523761, 4527477, 4237511,
1123513, 4209796, 3584772, 4250563, 2091754, 1618766, 2139944, 4525352, 382159, 2955887,
41760, 2313998, 496912, 3791570, 3904792, 3613654, 873959, 127076, 2537797, 2458107,
4543265, 3661909, 26828, 271816, 17854, 2461269, 1776042, 1573899, 3409957, 4335712,
4534313, 3392751, 1230124, 2159031, 4444015, 3373087, 3848014, 2026600, 1382747, 3537242,
4536743, 4714155, 3788371, 3570849, 173741, 211962, 4377778, 119369, 2856973, 2945854,
1508054, 4503932, 3141566, 1842177, 3448683, 3384614, 2886508, 1573965, 990618, 3053734,
2918742, 4508753, 1032149, 60943, 4291620, 722607, 2883224, 169359, 4356585, 3725543,
3678729, 341673, 3592828, 4077251, 3382936, 3885685, 4630994, 1286698, 4449616, 1138430,
3113385, 4660578, 2539973, 4562286, 4085089, 494737, 3967610, 2130702, 1823755, 1369324,
3796951, 956299, 141730, 935144, 4381893, 4412545, 1382250, 3024476, 2364546, 3396164,
3573511, 314081, 577688, 4154135, 1567018, 4047761, 2446220, 1148833, 4842497, 3967186,
1175290, 3749667, 1209593, 3295627, 3169065, 2460328, 1838486, 1436923, 2843887, 3676426,
2079145, 2975635, 535071, 4287509, 3281107, 39606, 3115500, 3204573, 722131, 3124073},
2,
false)));
*/
CUGRAPH_TEST_PROGRAM_MAIN()
|
94b91342f3d89f08a2910f1341d475da49c6c65d.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/base_fixture.hpp>
#include <utilities/high_res_timer.hpp>
#include <utilities/test_utilities.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/experimental/graph.hpp>
#include <cugraph/experimental/graph_view.hpp>
#include <raft/cudart_utils.h>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <rmm/thrust_rmm_allocator.h>
#include <algorithm>
#include <tuple>
#include <vector>
#include <cuda_profiler_api.h>
typedef struct InducedEgo_Usecase_t {
std::string graph_file_full_path{};
std::vector<int32_t> ego_sources{};
int32_t radius;
bool test_weighted{false};
InducedEgo_Usecase_t(std::string const& graph_file_path,
std::vector<int32_t> const& ego_sources,
int32_t radius,
bool test_weighted)
: ego_sources(ego_sources), radius(radius), test_weighted(test_weighted)
{
if ((graph_file_path.length() > 0) && (graph_file_path[0] != '/')) {
graph_file_full_path = cugraph::test::get_rapids_dataset_root_dir() + "/" + graph_file_path;
} else {
graph_file_full_path = graph_file_path;
}
};
} InducedEgo_Usecase;
class Tests_InducedEgo : public ::testing::TestWithParam<InducedEgo_Usecase> {
public:
Tests_InducedEgo() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
template <typename vertex_t, typename edge_t, typename weight_t, bool store_transposed>
void run_current_test(InducedEgo_Usecase const& configuration)
{
int n_streams = std::min(configuration.ego_sources.size(), static_cast<std::size_t>(128));
raft::handle_t handle(n_streams);
cugraph::experimental::graph_t<vertex_t, edge_t, weight_t, store_transposed, false> graph(
handle);
std::tie(graph, std::ignore) = cugraph::test::
read_graph_from_matrix_market_file<vertex_t, edge_t, weight_t, store_transposed, false>(
handle, configuration.graph_file_full_path, configuration.test_weighted, false);
auto graph_view = graph.view();
rmm::device_uvector<vertex_t> d_ego_sources(configuration.ego_sources.size(),
handle.get_stream());
raft::update_device(d_ego_sources.data(),
configuration.ego_sources.data(),
configuration.ego_sources.size(),
handle.get_stream());
HighResTimer hr_timer;
hr_timer.start("egonet");
cudaProfilerStart();
auto [d_ego_edgelist_src, d_ego_edgelist_dst, d_ego_edgelist_weights, d_ego_edge_offsets] =
cugraph::experimental::extract_ego(handle,
graph_view,
d_ego_sources.data(),
static_cast<vertex_t>(configuration.ego_sources.size()),
configuration.radius);
cudaProfilerStop();
hr_timer.stop();
hr_timer.display(std::cout);
std::vector<size_t> h_cugraph_ego_edge_offsets(d_ego_edge_offsets.size());
std::vector<vertex_t> h_cugraph_ego_edgelist_src(d_ego_edgelist_src.size());
std::vector<vertex_t> h_cugraph_ego_edgelist_dst(d_ego_edgelist_dst.size());
raft::update_host(h_cugraph_ego_edgelist_src.data(),
d_ego_edgelist_src.data(),
d_ego_edgelist_src.size(),
handle.get_stream());
raft::update_host(h_cugraph_ego_edgelist_dst.data(),
d_ego_edgelist_dst.data(),
d_ego_edgelist_dst.size(),
handle.get_stream());
raft::update_host(h_cugraph_ego_edge_offsets.data(),
d_ego_edge_offsets.data(),
d_ego_edge_offsets.size(),
handle.get_stream());
ASSERT_TRUE(d_ego_edge_offsets.size() == (configuration.ego_sources.size() + 1));
ASSERT_TRUE(d_ego_edgelist_src.size() == d_ego_edgelist_dst.size());
if (configuration.test_weighted)
ASSERT_TRUE(d_ego_edgelist_src.size() == (*d_ego_edgelist_weights).size());
ASSERT_TRUE(h_cugraph_ego_edge_offsets[configuration.ego_sources.size()] ==
d_ego_edgelist_src.size());
for (size_t i = 0; i < configuration.ego_sources.size(); i++)
ASSERT_TRUE(h_cugraph_ego_edge_offsets[i] <= h_cugraph_ego_edge_offsets[i + 1]);
auto n_vertices = graph_view.get_number_of_vertices();
for (size_t i = 0; i < d_ego_edgelist_src.size(); i++) {
ASSERT_TRUE(
cugraph::experimental::is_valid_vertex(n_vertices, h_cugraph_ego_edgelist_src[i]));
ASSERT_TRUE(
cugraph::experimental::is_valid_vertex(n_vertices, h_cugraph_ego_edgelist_dst[i]));
}
}
};
TEST_P(Tests_InducedEgo, CheckInt32Int32FloatUntransposed)
{
run_current_test<int32_t, int32_t, float, false>(GetParam());
}
INSTANTIATE_TEST_SUITE_P(
simple_test,
Tests_InducedEgo,
::testing::Values(
InducedEgo_Usecase("test/datasets/karate.mtx", std::vector<int32_t>{0}, 1, false),
InducedEgo_Usecase("test/datasets/karate.mtx", std::vector<int32_t>{0}, 2, false),
InducedEgo_Usecase("test/datasets/karate.mtx", std::vector<int32_t>{1}, 3, false),
InducedEgo_Usecase("test/datasets/karate.mtx", std::vector<int32_t>{10, 0, 5}, 2, false),
InducedEgo_Usecase("test/datasets/karate.mtx", std::vector<int32_t>{9, 3, 10}, 2, false),
InducedEgo_Usecase(
"test/datasets/karate.mtx", std::vector<int32_t>{5, 9, 3, 10, 12, 13}, 2, true)));
// For perf analysis
/*
INSTANTIATE_TEST_SUITE_P(
simple_test,
Tests_InducedEgo,
::testing::Values(
InducedEgo_Usecase("test/datasets/soc-LiveJournal1.mtx", std::vector<int32_t>{0}, 1, false),
InducedEgo_Usecase("test/datasets/soc-LiveJournal1.mtx", std::vector<int32_t>{0}, 2, false),
InducedEgo_Usecase("test/datasets/soc-LiveJournal1.mtx", std::vector<int32_t>{0}, 3, false),
InducedEgo_Usecase("test/datasets/soc-LiveJournal1.mtx", std::vector<int32_t>{0}, 4, false),
InducedEgo_Usecase("test/datasets/soc-LiveJournal1.mtx", std::vector<int32_t>{0}, 5, false),
InducedEgo_Usecase(
"test/datasets/soc-LiveJournal1.mtx", std::vector<int32_t>{363617}, 2, false),
InducedEgo_Usecase(
"test/datasets/soc-LiveJournal1.mtx",
std::vector<int32_t>{
363617, 722214, 2337449, 2510183, 2513389, 225853, 2035807, 3836330, 1865496, 28755},
2,
false),
InducedEgo_Usecase(
"test/datasets/soc-LiveJournal1.mtx",
std::vector<int32_t>{
363617, 722214, 2337449, 2510183, 2513389, 225853, 2035807, 3836330, 1865496, 28755,
2536834, 3070144, 3888415, 3131712, 2382526, 1040771, 2631543, 4607218, 4465829, 3341686,
2772973, 2611175, 4526129, 2624421, 1220593, 2593137, 3270705, 1503899, 1213033, 4840102,
4529036, 3421116, 4264831, 4089751, 4272322, 3486998, 2830318, 320953, 2388331, 520808,
3023094, 1600294, 3631119, 1716614, 4829213, 1175844, 960680, 847662, 3277365, 3957318,
3455123, 2454259, 670953, 4465677, 1027332, 2560721, 89061, 1163406, 3109528, 3221856,
4714426, 2382774, 37828, 4433616, 3283229, 591911, 4200188, 442522, 872207, 2437601,
741003, 266241, 914618, 3626195, 2021080, 4679624, 777476, 2527796, 1114017, 640142,
49259, 4069879, 3869098, 1105040, 4707804, 3208582, 3325885, 1450601, 4072548, 2037062,
2029646, 4575891, 1488598, 79105, 4827273, 3795434, 4647518, 4733397, 3980718, 1184627},
2,
false),
InducedEgo_Usecase(
"test/datasets/soc-LiveJournal1.mtx",
std::vector<int32_t>{
363617, 722214, 2337449, 2510183, 2513389, 225853, 2035807, 3836330, 1865496, 28755,
2536834, 3070144, 3888415, 3131712, 2382526, 1040771, 2631543, 4607218, 4465829, 3341686,
2772973, 2611175, 4526129, 2624421, 1220593, 2593137, 3270705, 1503899, 1213033, 4840102,
4529036, 3421116, 4264831, 4089751, 4272322, 3486998, 2830318, 320953, 2388331, 520808,
3023094, 1600294, 3631119, 1716614, 4829213, 1175844, 960680, 847662, 3277365, 3957318,
3455123, 2454259, 670953, 4465677, 1027332, 2560721, 89061, 1163406, 3109528, 3221856,
4714426, 2382774, 37828, 4433616, 3283229, 591911, 4200188, 442522, 872207, 2437601,
741003, 266241, 914618, 3626195, 2021080, 4679624, 777476, 2527796, 1114017, 640142,
49259, 4069879, 3869098, 1105040, 4707804, 3208582, 3325885, 1450601, 4072548, 2037062,
2029646, 4575891, 1488598, 79105, 4827273, 3795434, 4647518, 4733397, 3980718, 1184627,
984983, 3114832, 1967741, 1599818, 144593, 2698770, 2889449, 2495550, 1053813, 1193622,
686026, 3989015, 2040719, 4693428, 3190376, 2926728, 3399030, 1664419, 662429, 4526841,
2186957, 3752558, 2440046, 2930226, 3633006, 4058166, 3137060, 3499296, 2126343, 148971,
2199672, 275811, 2813976, 2274536, 1189239, 1335942, 2465624, 2596042, 829684, 193400,
2682845, 3691697, 4022437, 4051170, 4195175, 2876420, 3984220, 2174475, 326134, 2606530,
2493046, 4706121, 1498980, 4576225, 1271339, 44832, 1875673, 4664940, 134931, 736397,
4333554, 2751031, 2163610, 2879676, 3174153, 3317403, 2052464, 1881883, 4757859, 3596257,
2358088, 2578758, 447504, 590720, 1717038, 1869795, 1133885, 3027521, 840312, 2818881,
3654321, 2730947, 353585, 1134903, 2223378, 1508824, 3662521, 1363776, 2712071, 288441,
1204581, 3502242, 4645567, 2767267, 1514366, 3956099, 1422145, 1216608, 2253360, 189132,
4238225, 1345783, 451571, 1599442, 3237284, 4711405, 929446, 1857675, 150759, 1277633,
761210, 138628, 1026833, 2599544, 2464737, 989203, 3399615, 2144292, 216142, 637312,
2044964, 716256, 1660632, 1762919, 4784357, 2213415, 2764769, 291806, 609772, 3264819,
1870953, 1516385, 235647, 1045474, 2664957, 819095, 1824119, 4045271, 4448109, 1676788,
4285177, 1580502, 3546548, 2771971, 3927086, 1339779, 3156204, 1730998, 1172522, 2433024,
4533449, 479930, 2010695, 672994, 3542039, 3176455, 26352, 2137735, 866910, 4410835,
2623982, 3603159, 2555625, 2765653, 267865, 2015523, 1009052, 4713994, 1600667, 2176195,
3179631, 4570390, 2018424, 3356384, 1784287, 894861, 3622099, 1647273, 3044136, 950354,
1491760, 3416929, 3757300, 2244912, 4129215, 1600848, 3867343, 72329, 919189, 992521,
3445975, 4712557, 4680974, 188419, 2612093, 1991268, 3566207, 2281468, 3859078, 2492806,
3398628, 763441, 2679107, 2554420, 2130132, 4664374, 1182901, 3890770, 4714667, 4209303,
4013060, 3617653, 2040022, 3296519, 4190671, 1693353, 2678411, 3788834, 2781815, 191965,
1083926, 503974, 3529226, 1650522, 1900976, 542080, 3423929, 3418905, 878165, 4701703,
3022790, 4316365, 76365, 4053672, 1358185, 3830478, 4445661, 3210024, 1895915, 4541133,
2938808, 562788, 3920065, 1458776, 4052046, 2967475, 1092809, 3203538, 159626, 3399464,
214467, 3343982, 1811854, 3189045, 4272117, 4701563, 424807, 4341116, 760545, 4674683,
1538018, 386762, 194237, 2162719, 1694433, 943728, 2389036, 2196653, 3085571, 1513424,
3689413, 3278747, 4197291, 3324063, 3651090, 1737936, 2768803, 2768889, 3108096, 4311775,
3569480, 886705, 733256, 2477493, 1735412, 2960895, 1983781, 1861797, 3566460, 4537673,
1164093, 3499764, 4553071, 3518985, 847658, 918948, 2922351, 1056144, 652895, 1013195,
780505, 1702928, 3562838, 1432719, 2405207, 1054920, 641647, 2240939, 3617702, 383165,
652641, 879593, 1810739, 2096385, 4497865, 4768530, 1743968, 3582014, 1025009, 3002122,
2422190, 527647, 1251821, 2571153, 4095874, 3705333, 3637407, 1385567, 4043855, 4041930,
2433139, 1710383, 1127734, 4362316, 711588, 817839, 3214775, 910077, 1313768, 2382229,
16864, 2081770, 3095420, 3195272, 548711, 2259860, 1167323, 2435974, 425238, 2085179,
2630042, 2632881, 2867923, 3703565, 1037695, 226617, 4379130, 1541468, 3581937, 605965,
1137674, 4655221, 4769963, 1394370, 4425315, 2990132, 2364485, 1561137, 2713384, 481509,
2900382, 934766, 2986774, 1767669, 298593, 2502539, 139296, 3794229, 4002180, 4718138,
2909238, 423691, 3023810, 2784924, 2760160, 1971980, 316683, 3828090, 3253691, 4839313,
1203624, 584938, 3901482, 1747543, 1572737, 3533226, 774708, 1691195, 1037110, 1557763,
225120, 4424243, 3524086, 1717663, 4332507, 3513592, 4274932, 1232118, 873498, 1416042,
2488925, 111391, 4704545, 4492545, 445317, 1584812, 2187737, 2471948, 3731678, 219255,
2282627, 2589971, 2372185, 4609096, 3673961, 2524410, 12823, 2437155, 3015974, 4188352,
3184084, 3690756, 1222341, 1278376, 3652030, 4162647, 326548, 3930062, 3926100, 1551222,
2722165, 4526695, 3997534, 4815513, 3139056, 2547644, 3028915, 4149092, 3656554, 2691582,
2676699, 1878842, 260174, 3129900, 4379993, 182347, 2189338, 3783616, 2616666, 2596952,
243007, 4179282, 2730, 1939894, 2332032, 3335636, 182332, 3112260, 2174584, 587481,
4527368, 3154106, 3403059, 673206, 2150292, 446521, 1600204, 4819428, 2591357, 48490,
2917012, 2285923, 1072926, 2824281, 4364250, 956033, 311938, 37251, 3729300, 2726300,
644966, 1623020, 1419070, 4646747, 2417222, 2680238, 2561083, 1793801, 2349366, 339747,
611366, 4684147, 4356907, 1277161, 4510381, 3218352, 4161658, 3200733, 1172372, 3997786,
3169266, 3353418, 2248955, 2875885, 2365369, 498208, 2968066, 2681505, 2059048, 2097106,
3607540, 1121504, 2016789, 1762605, 3138431, 866081, 3705757, 3833066, 2599788, 760816,
4046672, 1544367, 2983906, 4842911, 209599, 1250954, 3333704, 561212, 4674336, 2831841,
3690724, 2929360, 4830834, 1177524, 2487687, 3525137, 875283, 651241, 2110742, 1296646,
1543739, 4349417, 2384725, 1931751, 1519208, 1520034, 3385008, 3219962, 734912, 170230,
1741419, 729913, 2860117, 2362381, 1199807, 2424230, 177824, 125948, 2722701, 4687548,
1140771, 3232742, 4522020, 4376360, 1125603, 590312, 2481884, 138951, 4086775, 615155,
3395781, 4587272, 283209, 568470, 4296185, 4344150, 2454321, 2672602, 838828, 4051647,
1709120, 3074610, 693235, 4356087, 3018806, 239410, 2431497, 691186, 766276, 4462126,
859155, 2370304, 1571808, 1938673, 1694955, 3871296, 4245059, 3987376, 301524, 2512461,
3410437, 3300380, 684922, 4581995, 3599557, 683515, 1850634, 3704678, 1937490, 2035591,
3718533, 2065879, 3160765, 1467884, 1912241, 2501509, 3668572, 3390469, 2501150, 612319,
713633, 1976262, 135946, 3641535, 632083, 13414, 4217765, 4137712, 2550250, 3281035,
4179598, 961045, 2020694, 4380006, 1345936, 289162, 1359035, 770872, 4509911, 3947317,
4719693, 248568, 2625660, 1237232, 2153208, 4814282, 1259954, 3677369, 861222, 2883506,
3339149, 3998335, 491017, 1609022, 2648112, 742132, 649609, 4206953, 3131106, 3504814,
3344486, 611721, 3215620, 2856233, 4447505, 1949222, 1868345, 712710, 6966, 4730666,
3181872, 2972889, 3038521, 3525444, 4385208, 1845613, 1124187, 2030476, 4468651, 2478792,
3473580, 3783357, 1852991, 1648485, 871319, 1670723, 4458328, 3218600, 1811100, 3443356,
2233873, 3035207, 2548692, 3337891, 3773674, 1552957, 4782811, 3144712, 3523466, 1491315,
3955852, 1838410, 3164028, 1092543, 776459, 2959379, 2541744, 4064418, 3908320, 2854145,
3960709, 1348188, 977678, 853619, 1304291, 2848702, 1657913, 1319826, 3322665, 788037,
2913686, 4471279, 1766285, 348304, 56570, 1892118, 4017244, 401006, 3524539, 4310134,
1624693, 4081113, 957511, 849400, 129975, 2616130, 378537, 1556787, 3916162, 1039980,
4407778, 2027690, 4213675, 839863, 683134, 75805, 2493150, 4215796, 81587, 751845,
1255588, 1947964, 1950470, 859401, 3077088, 3931110, 2316256, 1523761, 4527477, 4237511,
1123513, 4209796, 3584772, 4250563, 2091754, 1618766, 2139944, 4525352, 382159, 2955887,
41760, 2313998, 496912, 3791570, 3904792, 3613654, 873959, 127076, 2537797, 2458107,
4543265, 3661909, 26828, 271816, 17854, 2461269, 1776042, 1573899, 3409957, 4335712,
4534313, 3392751, 1230124, 2159031, 4444015, 3373087, 3848014, 2026600, 1382747, 3537242,
4536743, 4714155, 3788371, 3570849, 173741, 211962, 4377778, 119369, 2856973, 2945854,
1508054, 4503932, 3141566, 1842177, 3448683, 3384614, 2886508, 1573965, 990618, 3053734,
2918742, 4508753, 1032149, 60943, 4291620, 722607, 2883224, 169359, 4356585, 3725543,
3678729, 341673, 3592828, 4077251, 3382936, 3885685, 4630994, 1286698, 4449616, 1138430,
3113385, 4660578, 2539973, 4562286, 4085089, 494737, 3967610, 2130702, 1823755, 1369324,
3796951, 956299, 141730, 935144, 4381893, 4412545, 1382250, 3024476, 2364546, 3396164,
3573511, 314081, 577688, 4154135, 1567018, 4047761, 2446220, 1148833, 4842497, 3967186,
1175290, 3749667, 1209593, 3295627, 3169065, 2460328, 1838486, 1436923, 2843887, 3676426,
2079145, 2975635, 535071, 4287509, 3281107, 39606, 3115500, 3204573, 722131, 3124073},
2,
false)));
*/
CUGRAPH_TEST_PROGRAM_MAIN()
|
814cf3ca33d82e5f8dc7590add87e2f03d31d28f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <device_launch_parameters.h>
#include "caffe/filler.hpp"
#include "caffe/layers/embed_layer.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <typename Dtype>
__global__ void EmbedForward(const int nthreads, const Dtype* bottom_data,
const Dtype* weight, const int N, Dtype* top_data) {
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int n = top_index / N;
const int d = top_index % N;
const int index = static_cast<int>(bottom_data[n]);
const int weight_index = abs(index * N + d);
top_data[top_index] = weight[weight_index];
}
}
template <typename Dtype>
__global__ void EmbedBackward(const int nthreads, const Dtype* bottom_data,
const Dtype* top_diff, const int N, Dtype* weight_diff) {
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int n = top_index / N;
const int d = top_index % N;
const int index = static_cast<int>(static_cast<double>(bottom_data[n]));
const int weight_index = index * N + d;
caffe_gpu_atomic_add(top_diff[top_index], weight_diff + weight_index);
}
}
template <typename Ftype, typename Btype>
void EmbedLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
const Ftype* weight = this->blobs_[0]->template gpu_data<Ftype>();
const int count = top[0]->count();
hipStream_t stream = Caffe::thread_stream();
EmbedForward // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
count, bottom_data, weight, N_, top_data);
CUDA_CHECK(hipStreamSynchronize(stream));
if (bias_term_) {
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, Ftype(1),
bias_multiplier_.template gpu_data<Ftype>(),
this->blobs_[1]->template gpu_data<Ftype>(), Ftype(1), top_data);
}
}
template <typename Ftype, typename Btype>
void EmbedLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
CHECK(!propagate_down[0]) << "Can't backpropagate to EmbedLayer input.";
if (this->param_propagate_down_[0]) {
const int top_count = top[0]->count();
const Btype* top_diff = top[0]->gpu_diff<Btype>();
const Btype* bottom_data = bottom[0]->gpu_data<Btype>();
Btype* weight_diff = this->blobs_[0]->template mutable_gpu_diff<Btype>();
hipStream_t stream = Caffe::thread_stream();
EmbedBackward // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(top_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
top_count, bottom_data, top_diff, N_, weight_diff);
CUDA_CHECK(hipStreamSynchronize(stream));
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Btype* top_diff = top[0]->gpu_diff<Btype>();
Btype* bias_diff = this->blobs_[1]->template mutable_gpu_diff<Btype>();
caffe_gpu_gemv(CblasTrans, M_, N_, Btype(1), top_diff,
bias_multiplier_.template gpu_data<Btype>(), Btype(1), bias_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(EmbedLayer);
} // namespace caffe
|
814cf3ca33d82e5f8dc7590add87e2f03d31d28f.cu
|
#include <vector>
#include <device_launch_parameters.h>
#include "caffe/filler.hpp"
#include "caffe/layers/embed_layer.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <typename Dtype>
__global__ void EmbedForward(const int nthreads, const Dtype* bottom_data,
const Dtype* weight, const int N, Dtype* top_data) {
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int n = top_index / N;
const int d = top_index % N;
const int index = static_cast<int>(bottom_data[n]);
const int weight_index = abs(index * N + d);
top_data[top_index] = weight[weight_index];
}
}
template <typename Dtype>
__global__ void EmbedBackward(const int nthreads, const Dtype* bottom_data,
const Dtype* top_diff, const int N, Dtype* weight_diff) {
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int n = top_index / N;
const int d = top_index % N;
const int index = static_cast<int>(static_cast<double>(bottom_data[n]));
const int weight_index = index * N + d;
caffe_gpu_atomic_add(top_diff[top_index], weight_diff + weight_index);
}
}
template <typename Ftype, typename Btype>
void EmbedLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
const Ftype* weight = this->blobs_[0]->template gpu_data<Ftype>();
const int count = top[0]->count();
cudaStream_t stream = Caffe::thread_stream();
EmbedForward // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
count, bottom_data, weight, N_, top_data);
CUDA_CHECK(cudaStreamSynchronize(stream));
if (bias_term_) {
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, Ftype(1),
bias_multiplier_.template gpu_data<Ftype>(),
this->blobs_[1]->template gpu_data<Ftype>(), Ftype(1), top_data);
}
}
template <typename Ftype, typename Btype>
void EmbedLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
CHECK(!propagate_down[0]) << "Can't backpropagate to EmbedLayer input.";
if (this->param_propagate_down_[0]) {
const int top_count = top[0]->count();
const Btype* top_diff = top[0]->gpu_diff<Btype>();
const Btype* bottom_data = bottom[0]->gpu_data<Btype>();
Btype* weight_diff = this->blobs_[0]->template mutable_gpu_diff<Btype>();
cudaStream_t stream = Caffe::thread_stream();
EmbedBackward // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
top_count, bottom_data, top_diff, N_, weight_diff);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Btype* top_diff = top[0]->gpu_diff<Btype>();
Btype* bias_diff = this->blobs_[1]->template mutable_gpu_diff<Btype>();
caffe_gpu_gemv(CblasTrans, M_, N_, Btype(1), top_diff,
bias_multiplier_.template gpu_data<Btype>(), Btype(1), bias_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(EmbedLayer);
} // namespace caffe
|
d2b4f804e4fb5c321e932042b9596e05e00c9f52.hip
|
// !!! This is a file automatically generated by hipify!!!
// unit.cu
// Sanity check basic cuda commands.
// Let Catch provide main():
#define CATCH_CONFIG_MAIN
#include <iostream>
#include <vector>
#include "catch.hpp"
#include "graphes/graph.h"
#include "layers/graph_conv.h"
#include "linAlg/matrix.h"
TEST_CASE("hipMemcpy") {
Matrix<float> A(2, 2);
float in[] = {2, 1, 1, 1};
float out[] = {0, 0, 0, 0};
A.setValues(in);
hipMemcpy(out, A.getData(), 4 * sizeof(float), hipMemcpyDeviceToHost);
REQUIRE(((out[0] == 2)
&& (out[1] == 1)
&& (out[2] == 1)
&& (out[3] == 1)));
}
TEST_CASE("matMul") {
// C = A @ B
Matrix<float> A(2, 2);
Matrix<float> B(2, 2);
Matrix<float> C(2, 2);
float in[] = {2, 1, 1, 1};
float out[] = {0, 0, 0, 0};
A.setValues(in);
B.setValues(in);
hipblasHandle_t handle;
hipblasCreate(&handle);
matMul(handle, A, B, C);
hipMemcpy(out, C.getData(), 4 * sizeof(float), hipMemcpyDeviceToHost);
REQUIRE(((out[0] == 5)
&& (out[1] == 3)
&& (out[2] == 3)
&& (out[3] == 2)));
}
TEST_CASE("matMul_Add") {
// D = A @ B + C
Matrix<float> A(2, 2);
Matrix<float> B(2, 2);
Matrix<float> C(2, 2);
Matrix<float> D(2, 2);
float data0[] = {2, 1, 1, 1};
float data1[] = {1, 2, 3, 4};
float out[] = {0, 0, 0, 0};
A.setValues(data0);
B.setValues(data0);
C.setValues(data1);
hipblasHandle_t handle;
hipblasCreate(&handle);
matMul_Add(handle, A, B, C, D);
hipMemcpy(out, D.getData(), 4 * sizeof(float), hipMemcpyDeviceToHost);
REQUIRE(((out[0] == 6)
&& (out[1] == 5)
&& (out[2] == 6)
&& (out[3] == 6)));
}
TEST_CASE("add") {
// B = alpha * A + B
Matrix<float> A(2, 2);
Matrix<float> B(2, 2);
float alpha = 0.5;
float data[] = {1, 2, 3, 4};
float out[] = {0, 0, 0, 0};
A.setValues(data);
B.setValues(out);
hipblasHandle_t handle;
hipblasCreate(&handle);
add(handle, A, B, alpha);
hipMemcpy(out, B.getData(), 4 * sizeof(float), hipMemcpyDeviceToHost);
REQUIRE(((out[0] == 0.5)
&& (out[1] == 1.0)
&& (out[2] == 1.5)
&& (out[3] == 2.0)));
}
TEST_CASE("matElementMul") {
// This function applies the Hadamard product
Matrix<float> A(2, 2);
Matrix<float> B(2, 2);
Matrix<float> C(2, 2);
float data[] = {1, 2, 3, 4};
float out[] = {0, 0, 0, 0};
A.setValues(data);
B.setValues(data);
matElementMul(A, B, C);
hipMemcpy(out, C.getData(), 4 * sizeof(float), hipMemcpyDeviceToHost);
REQUIRE(((out[0] == 1)
&& (out[1] == 4)
&& (out[2] == 9)
&& (out[3] == 16)));
}
struct addTwo {
__host__ __device__
float operator() (float a) const {
return a + 2.0f;
}
};
TEST_CASE("matApply") {
// B = op(A). op is a function that applies elementwise.
Matrix<float> A(2, 2);
Matrix<float> B(2, 2);
float data[] = {1, 2, 3, 4};
float out[] = {0, 0, 0, 0};
A.setValues(data);
matApply(A, B, addTwo{});
hipMemcpy(out, B.getData(), 4 * sizeof(float), hipMemcpyDeviceToHost);
REQUIRE(((out[0] == 3)
&& (out[1] == 4)
&& (out[2] == 5)
&& (out[3] == 6)));
}
TEST_CASE("sparseMatMul") {
// Like matMul but with csr representation. The Graph class handles
// this. The values are weird because the graph normalizes the
// values it parses from the adjacency list values.
std::vector<std::vector<size_t>> adj_list = {{1}, {0, 2}, {1}};
hipsparseHandle_t sparseHandle;
hipsparseCreate(&sparseHandle);
Graph<float> g(adj_list, sparseHandle);
Matrix<float> A(3, 3);
Matrix<float> B(3, 3);
float data[] = {1, 1, 0,
0, 1, 0,
0, 0, 1};
A.setValues(data);
sparseMatMul(sparseHandle, g, A, B);
hipMemcpy(data, B.getData(), 9 * sizeof(float), hipMemcpyDeviceToHost);
REQUIRE(((data[0] == Approx(0.908).epsilon(0.01))
&& (data[1] == Approx(0.741).epsilon(0.01))));
}
|
d2b4f804e4fb5c321e932042b9596e05e00c9f52.cu
|
// unit.cu
// Sanity check basic cuda commands.
// Let Catch provide main():
#define CATCH_CONFIG_MAIN
#include <iostream>
#include <vector>
#include "catch.hpp"
#include "graphes/graph.h"
#include "layers/graph_conv.h"
#include "linAlg/matrix.h"
TEST_CASE("cudaMemcpy") {
Matrix<float> A(2, 2);
float in[] = {2, 1, 1, 1};
float out[] = {0, 0, 0, 0};
A.setValues(in);
cudaMemcpy(out, A.getData(), 4 * sizeof(float), cudaMemcpyDeviceToHost);
REQUIRE(((out[0] == 2)
&& (out[1] == 1)
&& (out[2] == 1)
&& (out[3] == 1)));
}
TEST_CASE("matMul") {
// C = A @ B
Matrix<float> A(2, 2);
Matrix<float> B(2, 2);
Matrix<float> C(2, 2);
float in[] = {2, 1, 1, 1};
float out[] = {0, 0, 0, 0};
A.setValues(in);
B.setValues(in);
cublasHandle_t handle;
cublasCreate(&handle);
matMul(handle, A, B, C);
cudaMemcpy(out, C.getData(), 4 * sizeof(float), cudaMemcpyDeviceToHost);
REQUIRE(((out[0] == 5)
&& (out[1] == 3)
&& (out[2] == 3)
&& (out[3] == 2)));
}
TEST_CASE("matMul_Add") {
// D = A @ B + C
Matrix<float> A(2, 2);
Matrix<float> B(2, 2);
Matrix<float> C(2, 2);
Matrix<float> D(2, 2);
float data0[] = {2, 1, 1, 1};
float data1[] = {1, 2, 3, 4};
float out[] = {0, 0, 0, 0};
A.setValues(data0);
B.setValues(data0);
C.setValues(data1);
cublasHandle_t handle;
cublasCreate(&handle);
matMul_Add(handle, A, B, C, D);
cudaMemcpy(out, D.getData(), 4 * sizeof(float), cudaMemcpyDeviceToHost);
REQUIRE(((out[0] == 6)
&& (out[1] == 5)
&& (out[2] == 6)
&& (out[3] == 6)));
}
TEST_CASE("add") {
// B = alpha * A + B
Matrix<float> A(2, 2);
Matrix<float> B(2, 2);
float alpha = 0.5;
float data[] = {1, 2, 3, 4};
float out[] = {0, 0, 0, 0};
A.setValues(data);
B.setValues(out);
cublasHandle_t handle;
cublasCreate(&handle);
add(handle, A, B, alpha);
cudaMemcpy(out, B.getData(), 4 * sizeof(float), cudaMemcpyDeviceToHost);
REQUIRE(((out[0] == 0.5)
&& (out[1] == 1.0)
&& (out[2] == 1.5)
&& (out[3] == 2.0)));
}
TEST_CASE("matElementMul") {
// This function applies the Hadamard product
Matrix<float> A(2, 2);
Matrix<float> B(2, 2);
Matrix<float> C(2, 2);
float data[] = {1, 2, 3, 4};
float out[] = {0, 0, 0, 0};
A.setValues(data);
B.setValues(data);
matElementMul(A, B, C);
cudaMemcpy(out, C.getData(), 4 * sizeof(float), cudaMemcpyDeviceToHost);
REQUIRE(((out[0] == 1)
&& (out[1] == 4)
&& (out[2] == 9)
&& (out[3] == 16)));
}
struct addTwo {
__host__ __device__
float operator() (float a) const {
return a + 2.0f;
}
};
TEST_CASE("matApply") {
// B = op(A). op is a function that applies elementwise.
Matrix<float> A(2, 2);
Matrix<float> B(2, 2);
float data[] = {1, 2, 3, 4};
float out[] = {0, 0, 0, 0};
A.setValues(data);
matApply(A, B, addTwo{});
cudaMemcpy(out, B.getData(), 4 * sizeof(float), cudaMemcpyDeviceToHost);
REQUIRE(((out[0] == 3)
&& (out[1] == 4)
&& (out[2] == 5)
&& (out[3] == 6)));
}
TEST_CASE("sparseMatMul") {
// Like matMul but with csr representation. The Graph class handles
// this. The values are weird because the graph normalizes the
// values it parses from the adjacency list values.
std::vector<std::vector<size_t>> adj_list = {{1}, {0, 2}, {1}};
cusparseHandle_t sparseHandle;
cusparseCreate(&sparseHandle);
Graph<float> g(adj_list, sparseHandle);
Matrix<float> A(3, 3);
Matrix<float> B(3, 3);
float data[] = {1, 1, 0,
0, 1, 0,
0, 0, 1};
A.setValues(data);
sparseMatMul(sparseHandle, g, A, B);
cudaMemcpy(data, B.getData(), 9 * sizeof(float), cudaMemcpyDeviceToHost);
REQUIRE(((data[0] == Approx(0.908).epsilon(0.01))
&& (data[1] == Approx(0.741).epsilon(0.01))));
}
|
d69f18ff657166abf1873c8402bc19e18ecdf64e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void emptyKernel()
{
// leerer Kernel
}
int main(int argc, char ** argv)
{
dim3 dimGrid(1);
dim3 dimBlock(1);
hipLaunchKernelGGL(( emptyKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, );
return 0;
}
|
d69f18ff657166abf1873c8402bc19e18ecdf64e.cu
|
#include <stdio.h>
__global__ void emptyKernel()
{
// leerer Kernel
}
int main(int argc, char ** argv)
{
dim3 dimGrid(1);
dim3 dimBlock(1);
emptyKernel<<<dimGrid, dimBlock>>>();
return 0;
}
|
d7353b79680410f49b67e162ab479fb553397ecf.hip
|
// !!! This is a file automatically generated by hipify!!!
// tests hipEventCreate
#include <iostream>
#include <memory>
#include <unistd.h>
using namespace std;
#include <hip/hip_runtime.h>
__global__ void longKernel(float *data, int N, float value) {
for(int i = 0; i < N; i++) {
data[i] += value;
}
}
void myCallback(hipStream_t stream, size_t status, void *data) {
char *message = (char *)data;
cout << "message " << message << endl;
}
int main(int argc, char *argv[]) {
int N = 52400; // * 1024;
float *hostfloats = new float[N];
float *gpufloats;
hipMalloc((void **)&gpufloats, N * sizeof(float));
hipStream_t stream;
hipStreamCreate__(&stream, 0);
hipLaunchKernelGGL(( longKernel), dim3(dim3(102400 / 32, 1, 1)), dim3(dim3(32, 1, 1)), 0, stream, gpufloats, N, 3.0f);
cout << "queued kernel x" << endl;
const char *message = "hello";
hipStreamAddCallback(stream, myCallback, (void *)message, 0);
cout << "added callback" << endl;
sleep(1);
cout << "synchronizing..." << endl;
hipStreamSynchronize(stream);
cout << "... synchronized" << endl;
hipStreamDestroy(stream);
hipFree(gpufloats);
cout << "finished" << endl;
return 0;
}
|
d7353b79680410f49b67e162ab479fb553397ecf.cu
|
// tests cuEventCreate
#include <iostream>
#include <memory>
#include <unistd.h>
using namespace std;
#include <cuda.h>
__global__ void longKernel(float *data, int N, float value) {
for(int i = 0; i < N; i++) {
data[i] += value;
}
}
void myCallback(CUstream stream, size_t status, void *data) {
char *message = (char *)data;
cout << "message " << message << endl;
}
int main(int argc, char *argv[]) {
int N = 52400; // * 1024;
float *hostfloats = new float[N];
float *gpufloats;
cudaMalloc((void **)&gpufloats, N * sizeof(float));
CUstream stream;
cuStreamCreate(&stream, 0);
longKernel<<<dim3(102400 / 32, 1, 1), dim3(32, 1, 1), 0, stream>>>(gpufloats, N, 3.0f);
cout << "queued kernel x" << endl;
const char *message = "hello";
cudaStreamAddCallback(stream, myCallback, (void *)message, 0);
cout << "added callback" << endl;
sleep(1);
cout << "synchronizing..." << endl;
cuStreamSynchronize(stream);
cout << "... synchronized" << endl;
cuStreamDestroy(stream);
cudaFree(gpufloats);
cout << "finished" << endl;
return 0;
}
|
8d4ecd31d15ea6069a74c7ef13dbe319da32b988.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/Exceptions.h>
#include <THH/THHTensorMathReduce.cuh>
#include <math.h>
#include "ATen/native/Distance.h"
namespace at { namespace native {
namespace {
static const int forward_threads = 256;
template <typename scalar_t>
static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);
template <>
__forceinline__ __device__ float device_sqrt(float val) {
return ::sqrtf(val);
}
template <>
__forceinline__ __device__ double device_sqrt(double val) {
return ::sqrt(val);
}
template <typename scalar_t>
struct dists {
static __forceinline__ __device__ scalar_t sign(scalar_t val) {
return (0 < val) - (val < 0);
}
// Zero norm
struct zero {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff != 0.0; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
};
// One norm
struct one {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff); }
};
// Special case backward when p is less than two
struct lt_two {
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : sign(diff) * ::pow(std::abs(diff), p - 1) * grad / ::pow(dist, p - 1); }
};
// Two norm
struct two {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff * diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return device_sqrt<scalar_t>(agg); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : grad * diff / dist; }
};
// General p norm
struct p {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += ::pow(diff, p); }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return ::pow(agg, static_cast<scalar_t>(1) / p); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : diff * ::pow(std::abs(diff), p - 2) * grad / ::pow(dist, p - 1); }
};
// Inf norm
struct inf {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { if (diff > agg) { agg = diff; } }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { if (other > update) { update = other; } }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff) * (std::abs(diff) == dist); }
};
};
template <typename scalar_t, typename F>
__global__ static void pdist_kernel_cuda_impl(scalar_t * result, const scalar_t * self, const int64_t n, const int64_t m, const scalar_t p,
const double n2, const double n2_squared_minus_1) {
const int k = blockIdx.x;
const int stride = blockDim.x;
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * a = start + threadIdx.x;
const scalar_t * b = self + j * m + threadIdx.x;
scalar_t agg = 0.0;
for (; a < end; a += stride, b += stride) {
F::inc(agg, std::abs(*a - *b), p);
}
// Reduce warps
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
// Reduce block
// This shared memory is significantly larger than necessary, but the
// assumption is that it's not a bottleneck, and this is simple
__shared__ scalar_t shared[forward_threads];
int lane = threadIdx.x % warpSize;
int warp_id = threadIdx.x / warpSize;
if (lane == 0) {
shared[warp_id] = agg;
}
__syncthreads();
agg = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0.0;
if (warp_id == 0) {
// Only reduce theads with nonzero data
for (int offset = blockDim.x / warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
}
if (threadIdx.x == 0) {
result[k] = F::finish(agg, p);
}
}
template <typename scalar_t, typename F>
__global__ static void pdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * self, const scalar_t * dist, int64_t gs, const int64_t n, const int64_t m, const int64_t combs, const scalar_t p,
const double n2, const double n2_squared_minus_1) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int init = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (k >= combs) {
return;
}
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
int64_t ib = j - i - 1;
int64_t jb = n - 2 - i;
const scalar_t grad_k = grad[k * gs];
const scalar_t dist_k = dist[k];
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * self_i = start + init;
const scalar_t * self_j = self + j * m + init;
scalar_t * buff_i = buffer + (ib * n + i) * m + init;
scalar_t * buff_j = buffer + (jb * n + j) * m + init;
for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride, buff_j += stride) {
const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p);
*buff_i = res;
*buff_j = -res;
}
}
void pdist_forward_kernel_impl(Tensor& result, const Tensor& self, double p) {
const dim3 grid(result.numel());
const dim3 block(forward_threads);
int64_t n = self.size(0);
int64_t m = self.size(1);
// https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do
// some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device.
const double n2 = n - .5;
const double n2_squared_minus_1 = n2 * n2 - 1;
AT_DISPATCH_FLOATING_TYPES(self.type(), "pdist_cuda", [&] {
if (p == 0.0) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (p == 1.0) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (p == 2.0) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (std::isinf(p)) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
}
});
AT_CUDA_CHECK(hipGetLastError());
}
void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) {
if (p == 0.0 || grad.numel() == 0 || self.numel() == 0) {
result.fill_(0);
return;
}
const int64_t n = result.size(0);
int64_t m = self.size(1);
const int block_x = 64;
// NB: be careful with changing block_y; as it's currently written, grid_y is limited to be 2^16.
// From binary search, block_y of 16 gives us max pdist dim0 of 1449,
// block_y of 4 gives us max pdist dim0 of 725.
const int block_y = 16;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int grid_y = (dist.numel() + block_y - 1) / block_y;
const dim3 grid(grid_x, grid_y);
const dim3 block(block_x, block_y);
// https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do
// some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device.
const double n2 = n - .5;
const double n2_squared_minus_1 = n2 * n2 - 1;
Tensor buffer = at::empty({n - 1, result.size(0), result.size(1)}, result.options());
AT_DISPATCH_FLOATING_TYPES(self.type(), "pdist_cuda_backward", [&] {
if (p == 1.0) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (p < 2.0) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (p == 2.0) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (std::isinf(p)) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
}
});
AT_CUDA_CHECK(hipGetLastError());
at::sum_out(result, buffer, 0);
}
} // anonymous namespace
REGISTER_DISPATCH(pdist_forward_stub, &pdist_forward_kernel_impl);
REGISTER_DISPATCH(pdist_backward_stub, &pdist_backward_kernel_impl);
}} // at::native
|
8d4ecd31d15ea6069a74c7ef13dbe319da32b988.cu
|
#include <ATen/ATen.h>
#include <ATen/cuda/Exceptions.h>
#include <THC/THCTensorMathReduce.cuh>
#include <math.h>
#include "ATen/native/Distance.h"
namespace at { namespace native {
namespace {
static const int forward_threads = 256;
template <typename scalar_t>
static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);
template <>
__forceinline__ __device__ float device_sqrt(float val) {
return ::sqrtf(val);
}
template <>
__forceinline__ __device__ double device_sqrt(double val) {
return ::sqrt(val);
}
template <typename scalar_t>
struct dists {
static __forceinline__ __device__ scalar_t sign(scalar_t val) {
return (0 < val) - (val < 0);
}
// Zero norm
struct zero {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff != 0.0; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
};
// One norm
struct one {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff); }
};
// Special case backward when p is less than two
struct lt_two {
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : sign(diff) * std::pow(std::abs(diff), p - 1) * grad / std::pow(dist, p - 1); }
};
// Two norm
struct two {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff * diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return device_sqrt<scalar_t>(agg); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : grad * diff / dist; }
};
// General p norm
struct p {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += std::pow(diff, p); }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return std::pow(agg, static_cast<scalar_t>(1) / p); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : diff * std::pow(std::abs(diff), p - 2) * grad / std::pow(dist, p - 1); }
};
// Inf norm
struct inf {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { if (diff > agg) { agg = diff; } }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { if (other > update) { update = other; } }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff) * (std::abs(diff) == dist); }
};
};
template <typename scalar_t, typename F>
__global__ static void pdist_kernel_cuda_impl(scalar_t * result, const scalar_t * self, const int64_t n, const int64_t m, const scalar_t p,
const double n2, const double n2_squared_minus_1) {
const int k = blockIdx.x;
const int stride = blockDim.x;
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * a = start + threadIdx.x;
const scalar_t * b = self + j * m + threadIdx.x;
scalar_t agg = 0.0;
for (; a < end; a += stride, b += stride) {
F::inc(agg, std::abs(*a - *b), p);
}
// Reduce warps
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
// Reduce block
// This shared memory is significantly larger than necessary, but the
// assumption is that it's not a bottleneck, and this is simple
__shared__ scalar_t shared[forward_threads];
int lane = threadIdx.x % warpSize;
int warp_id = threadIdx.x / warpSize;
if (lane == 0) {
shared[warp_id] = agg;
}
__syncthreads();
agg = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0.0;
if (warp_id == 0) {
// Only reduce theads with nonzero data
for (int offset = blockDim.x / warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
}
if (threadIdx.x == 0) {
result[k] = F::finish(agg, p);
}
}
template <typename scalar_t, typename F>
__global__ static void pdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * self, const scalar_t * dist, int64_t gs, const int64_t n, const int64_t m, const int64_t combs, const scalar_t p,
const double n2, const double n2_squared_minus_1) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int init = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (k >= combs) {
return;
}
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
int64_t ib = j - i - 1;
int64_t jb = n - 2 - i;
const scalar_t grad_k = grad[k * gs];
const scalar_t dist_k = dist[k];
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * self_i = start + init;
const scalar_t * self_j = self + j * m + init;
scalar_t * buff_i = buffer + (ib * n + i) * m + init;
scalar_t * buff_j = buffer + (jb * n + j) * m + init;
for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride, buff_j += stride) {
const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p);
*buff_i = res;
*buff_j = -res;
}
}
void pdist_forward_kernel_impl(Tensor& result, const Tensor& self, double p) {
const dim3 grid(result.numel());
const dim3 block(forward_threads);
int64_t n = self.size(0);
int64_t m = self.size(1);
// https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do
// some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device.
const double n2 = n - .5;
const double n2_squared_minus_1 = n2 * n2 - 1;
AT_DISPATCH_FLOATING_TYPES(self.type(), "pdist_cuda", [&] {
if (p == 0.0) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (p == 1.0) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (p == 2.0) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (std::isinf(p)) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
}
});
AT_CUDA_CHECK(cudaGetLastError());
}
void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) {
if (p == 0.0 || grad.numel() == 0 || self.numel() == 0) {
result.fill_(0);
return;
}
const int64_t n = result.size(0);
int64_t m = self.size(1);
const int block_x = 64;
// NB: be careful with changing block_y; as it's currently written, grid_y is limited to be 2^16.
// From binary search, block_y of 16 gives us max pdist dim0 of 1449,
// block_y of 4 gives us max pdist dim0 of 725.
const int block_y = 16;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int grid_y = (dist.numel() + block_y - 1) / block_y;
const dim3 grid(grid_x, grid_y);
const dim3 block(block_x, block_y);
// https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do
// some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device.
const double n2 = n - .5;
const double n2_squared_minus_1 = n2 * n2 - 1;
Tensor buffer = at::empty({n - 1, result.size(0), result.size(1)}, result.options());
AT_DISPATCH_FLOATING_TYPES(self.type(), "pdist_cuda_backward", [&] {
if (p == 1.0) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (p < 2.0) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (p == 2.0) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (std::isinf(p)) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
}
});
AT_CUDA_CHECK(cudaGetLastError());
at::sum_out(result, buffer, 0);
}
} // anonymous namespace
REGISTER_DISPATCH(pdist_forward_stub, &pdist_forward_kernel_impl);
REGISTER_DISPATCH(pdist_backward_stub, &pdist_backward_kernel_impl);
}} // at::native
|
f2de6687934f76932b99a1299bc97dcde9e1dc92.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "dhidden_cal_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a1 = NULL;
hipMalloc(&a1, XSIZE*YSIZE);
double *dhidden = NULL;
hipMalloc(&dhidden, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
dhidden_cal_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a1,dhidden,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
dhidden_cal_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a1,dhidden,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
dhidden_cal_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a1,dhidden,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
f2de6687934f76932b99a1299bc97dcde9e1dc92.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "dhidden_cal_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a1 = NULL;
cudaMalloc(&a1, XSIZE*YSIZE);
double *dhidden = NULL;
cudaMalloc(&dhidden, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
dhidden_cal_kernel<<<gridBlock,threadBlock>>>(a1,dhidden,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
dhidden_cal_kernel<<<gridBlock,threadBlock>>>(a1,dhidden,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
dhidden_cal_kernel<<<gridBlock,threadBlock>>>(a1,dhidden,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
79d7c4f2d300bdea3ab28092110da8f8c691f94b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define M 20
// RED = 0, BLACK = 1
enum nodeColor {
RED,
BLACK
};
enum result {
Failure,
Success,
FirstInsert
};
enum caseFlag {
NOOP,
DID_CASE1,
DID_CASE3
};
struct par_rbNode {
int key, color;
int flag;
struct par_rbNode *left, *right, *parent;
};
// /*Function prototypes */
__device__ void createNIL();
__device__ struct par_rbNode * createNode(int);
__device__ void createTree();
__device__ struct par_rbNode * Traverse(struct par_rbNode *,int);
__device__ enum result PlaceNode(struct par_rbNode *);
__device__ void Insert_Rebalance(struct par_rbNode *);
__device__ bool Update_Rotation(struct par_rbNode *, enum caseFlag *);
__device__ bool Left_Rotate(struct par_rbNode *);
__device__ bool Right_Rotate(struct par_rbNode *);
__device__ void printPreorder(struct par_rbNode* );
__device__ void printInorder(struct par_rbNode* );
__device__ void printPostorder(struct par_rbNode* );
__device__ struct par_rbNode *nodes;
__device__ struct par_rbNode *root;
__device__ struct par_rbNode *NIL;
__device__ struct par_rbNode *rtParent;
__device__ struct par_rbNode *rtSibling; // U might feel this is unncessary, but it will be used
__device__ int nodeIndex = 0;
__device__ int tmpIndex = 0;
__device__ struct par_rbNode *tmp[M];// need M tmps
__device__ int createFlag = false;
__device__ void createNIL(){
NIL = &nodes[0];
NIL->color = BLACK;
NIL->key = -1;
NIL->left = NIL->right = NIL->parent = NIL;
printf("NIL created\n");
}
__device__ struct par_rbNode * createNode(int key){
bool ok = false;
do{
ok = atomicCAS(&createFlag,false,true); //Capture the lock
}while(ok);
atomicAdd(&nodeIndex,1);
atomicAdd(&tmpIndex,1);
nodes[nodeIndex].key = key;
nodes[nodeIndex].flag = true;
nodes[nodeIndex].left = nodes[nodeIndex].right = nodes[nodeIndex].parent = NIL;
tmp[tmpIndex] = &nodes[nodeIndex];
createFlag = false;
// atomicCAS(&createFlag,true,false); //Release the lock
printf("Created %d\n",key);
return tmp[tmpIndex]; // Even if this thread pauses it will eventually return the correct pointer
}
__device__ void createTree(){
rtParent = createNode(-1);
rtSibling = createNode(-1);
// NIL = createNode(-1);
root = NIL;
rtParent->parent = NIL;
rtSibling->parent = rtParent;
rtSibling->right = NIL;
rtSibling->left = NIL;
rtParent->left = root;
//rtParent->left = root; Why only left, y not right?
//ANS: Since we check for left parent condition first
//(if u don't understand, try to insert a node to a tree with only one node)
rtParent->right = rtSibling;
rtParent->flag = false;
rtSibling->flag = false;
rtParent->color = BLACK;
rtSibling->color = BLACK;
// NIL->left = NIL;
// NIL->right = NIL;
NIL->parent = rtParent;
NIL->flag = false;
// NIL->color = BLACK;
printf("Tree Created \n");
printf("\n");
}
__device__ struct par_rbNode * Traverse(struct par_rbNode *newNode,int key){
struct par_rbNode *x;
// struct par_rbNode *inertPoint;
// struct par_rbNode *savert;
bool success;
bool ok;
// do{
// savert = root;
// success = DCAS(&root->flag,false,true,&root,savert,savert); //Catching the flag of the root
// }while(!success);
//An alternate for DCAS - should check if it works or not
// do{
// savert = root;
// success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
// }while(savert!=root || !success);
do{
// savert = root;
success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
}while(success);
//success => captured the root flag
//savert != root => root has changed
//!success => root is under lock
//thread will come out of the loop only after "success" and "savert==root"
x = root;
if(x != NIL){
while(x != NIL){
struct par_rbNode *y = x;
if(key == x->key) {
x->flag = false; // Release the flag that was just caught
return NULL; // Traversing is done. Node is already there so Insert() fails.
}
if(key < x->key){
if(x->left != NIL){
ok = atomicCAS(&x->left->flag,false,true);
if(ok){
x->flag = false; // Release the flag of x
return NULL;
}//end if
x->flag = false;
x = x->left;
}else{
newNode->parent = x;
x = x->left;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}else{
if(x->right != NIL){
ok = atomicCAS(&x->right->flag,false,true);
if(ok){
x->flag = false;
return NULL;
}//end if
x->flag = false;
x = x->right;
}else{
newNode->parent = x;
x = x->right;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}//end if
}//end while
// return x->parent;
}else{
return NIL;
}
}
__device__ enum result PlaceNode(struct par_rbNode *newNode){
//flags on newNode and insPoint are held
bool ok = true;
// struct par_rbNode *uncle,*savep;
if(newNode->parent == NIL){ //tree is empty
newNode->color = BLACK;
newNode->parent = rtParent;
rtParent->left = newNode;
root=newNode;
NIL->flag = false; // release NIL node, that u caught during Traverse
newNode->flag = false;
newNode->left = newNode->right = NIL;
return FirstInsert;
}else{ // the tree is not empty so...
// newNode->parent = insPoint;
//set the flags of the grandparent and uncle
// struct par_rbNode *insPoint = newNode->parent;
printf("%d\n",newNode->parent->key);
printf("%d\n",newNode->parent->parent->left->key);
if(newNode->parent == newNode->parent->parent->left){ //uncle is right child
printf("Insert Key %d\n",newNode->parent->key);
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->right; // rtSibling is used here, when newNode->parent is root
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->right->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->right,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->right->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}else{// uncle is left child
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->left;
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->left->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->left,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->left->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}//end if
if(ok){
// This "!ok" is when u fail to capture the grandparent flag,
// u haven't caught any extra flags so just get rid of the flag of newNode->parent
newNode->parent->flag = false; // release flag
newNode->parent = NIL;
return Failure; //avoid deadlock
}
// When u have successfully captured all the required flags.
// i.e. parent, grandparent, uncle
if(newNode->key < newNode->parent->key){
//insert as left child
newNode->parent->left = newNode;
return Success;
}else{//insertas right child
newNode->parent->right = newNode;
printf("THE OK = %d\n",ok);
return Success;
}
}
}
__device__ void Insert_Rebalance(struct par_rbNode *x){ //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//we hold flags on x, p(x), p(p(x)), and uncle(x)
struct par_rbNode *oldx;
struct par_rbNode *uncle, *olduncle;
// struct par_rbNode *savep, *savegp;
struct par_rbNode *brother;
struct par_rbNode *nephew;
bool ok;
bool updateSucceeds; //Update-Rotation successded?
//caseF is short for caseFlag (avoiding confusion between global enum and local variable)
enum caseFlag caseF = NOOP; // initially not doing any case
//define uncle for first iteration
if(x->parent == x->parent->parent->left){
uncle = x->parent->parent->right;
}else{ // uncle is the left child not right
uncle = x->parent->parent->left;
}
while((x != root) && (x->parent->color == RED)){
//do color-update and/or rotaion as required
do{
updateSucceeds = Update_Rotation(x,&caseF);
}while(!updateSucceeds);
//CASE 1: move to grandparent after color update
if(caseF == DID_CASE1){
oldx = x; //save pointer to the old x
olduncle = uncle; // save pointer to old uncle;
x = x->parent->parent; // up to grandparent
do{ //find new uncle of x and get flags
if(x->parent == x->parent->parent->left){
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->right;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->right->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->right->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}else{
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->left;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->left->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->left->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}
}while(ok); //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//Release old flags for CASE 1
oldx->parent->flag = false;
olduncle->flag = false;
oldx->flag = false;
}
//in CASE 3 loop will exit: parent will be BLACK
}
switch(caseF){
case NOOP: //In the beginning of this function we had
//x,p(x),p(p(x)),uncle(x) - release them
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE1: //Release the last set of flags acquired
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE3: //release flags on ROTATED x, etc
if(x == x->parent->left){
brother = x->parent->right;
nephew = x->parent->right->right;
}else{
brother = x->parent->left;
nephew = x->parent->left->left;
}
x->parent->flag = false;
brother->flag = false;
nephew->flag = false;
x->flag = false;
break;
}
// printf("last %d\n",x->key);
root->color = BLACK;
}
__device__ bool Update_Rotation(struct par_rbNode *x, enum caseFlag *caseF){
//we hold flags on x, p(x), p(p(x)) and uncle(x)
struct par_rbNode *xUncle;
struct par_rbNode *oldx; //*ggp; // ggp -> greatgrandparent
bool ok;
if(x->parent == x->parent->parent->left){
//the parent is a left child
xUncle = x->parent->parent->right;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true; // This true is for "updateSucceeds"
}else{ // rotation(s) will be needed
if(x == x->parent->right){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Left_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false; //This false is for "updateSucceeds"
}
}
//In CASE 3, if the right-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok); //KEEPS TRYING, DOESN'T BACK OFF
ok = Right_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false; //This false is for "updateSucceeds"
}else{
x->parent->color = BLACK;
x->parent->right->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false; //remove the ggp flag as rotation was successful
return true;
}
}
//symmetric to above code
}else{
//the parent is a right child
xUncle = x->parent->parent->left;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true;
}else{ // rotation(s) will be needed
if(x == x->parent->left){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Right_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false;
}
}
//In CASE 3, if the left-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok);
ok = Left_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false;
}else{
x->parent->color = BLACK;
x->parent->left->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false;
return true;
}
}
}
}
//A rotation will always be successful(true), as u can reach the rotate command
//only after u have cptured all the requried flags
__device__ bool Left_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->right (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
// ok = CAS3(z->right,zrl,z->right,
// z->right,z,zrl->parent,
// zrl,zrl,z->right->left);
//update other links
root = zr;
rtParent->left = root;
root->parent = rtParent;
z->parent = root;
root->left = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->left){
//z is left child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->left = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}else{
// z is right child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->right = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}
}
return true;
}
//symmetric to Left_rotate
__device__ bool Right_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->left (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
// ok = CAS3(z->left,zrl,z->left,
// z->left,z,zrl->parent,
// zrl,zrl,z->left->right);
//update other links
root = zr;
rtParent->right = root;
root->parent = rtParent;
z->parent = root;
root->right = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->right){
//z is right child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->right = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}else{
// z is left child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->left = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}
}
return true;
}
__device__ void Insert(int key){
struct par_rbNode *newNode = createNode(key); //Internally the flag of the newNode is held
// struct par_rbNode *insertPoint;
// // Create and initialize the new node
// enum result res = Failure;
// //insert the new node
// do{
// //Traverse tree to find insertion point
// insertPoint = Traverse(newNode,key);
// if(insertPoint != NULL){
// //add new node to tree
// // printf("Placing Node\n");
// res = PlaceNode(newNode);
// printf("res = %d\n",res);
// // res is short for result (avoiding confusion b/w global enum and local variable)
// if(res == Success){
// printf("rebalance\n");
// //node was added succcessfully so make
// //tree red-black again by doing the
// //necessary color updates and rotations
// Insert_Rebalance(newNode);
// }
// }else{
// printf("Key Exists\n");
// res = Success;
// break;
// }
// }while(res == Failure);
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
//Functions for printing the tree
__device__ void printPreorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* then recur on left child */
printPreorder(node->left);
/* now recur on right child */
printPreorder(node->right);
}
__device__ void printInorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printInorder(node->left);
/* then print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* now recur on right child */
printInorder(node->right);
}
__device__ void printPostorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printPostorder(node->left);
/* then recur on right child */
printPostorder(node->right);
/* now print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
}
__device__ int threadsFinished = 0;
__device__ int passCreate = 0;
__global__ void RBT(struct par_rbNode *d_nodes) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
int threadCount = gridDim.x*blockDim.x;
if(id == 0){
printf("Starting the Tree\n");
nodes = d_nodes; // Make it a global variable
createNIL();
createTree();
atomicAdd(&passCreate,1);
}
Insert(5);
Insert(6);
Insert(4);
// while(1){
// if(passCreate){
// Insert(id);
// break;
// }
// }
// //Print the time
// //This will keep track of number of threads that are done
atomicAdd(&threadsFinished,1);
// // //Print the tree after all the threads are done
if(threadsFinished == threadCount){
if(id == 0){
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
}
//return to main
}
int main() {
struct par_rbNode h_nodes[M];
struct par_rbNode *d_nodes;
float time;
// 1. Allocate device array.
hipMalloc(&d_nodes, M * sizeof(struct par_rbNode));
for(int i=0;i<M;i++){
h_nodes[i].flag = false;
h_nodes[i].color = RED;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// 2. Copy array contents from host to device.
hipMemcpy(d_nodes, h_nodes, M * sizeof(struct par_rbNode), hipMemcpyHostToDevice);
printf("Kernel Launched\n");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( RBT), dim3(1),dim3(1), 0, 0, d_nodes);
hipMemcpy(h_nodes, d_nodes, M * sizeof(struct par_rbNode), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
printf("Came back\n");
hipEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
}
|
79d7c4f2d300bdea3ab28092110da8f8c691f94b.cu
|
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define M 20
// RED = 0, BLACK = 1
enum nodeColor {
RED,
BLACK
};
enum result {
Failure,
Success,
FirstInsert
};
enum caseFlag {
NOOP,
DID_CASE1,
DID_CASE3
};
struct par_rbNode {
int key, color;
int flag;
struct par_rbNode *left, *right, *parent;
};
// /*Function prototypes */
__device__ void createNIL();
__device__ struct par_rbNode * createNode(int);
__device__ void createTree();
__device__ struct par_rbNode * Traverse(struct par_rbNode *,int);
__device__ enum result PlaceNode(struct par_rbNode *);
__device__ void Insert_Rebalance(struct par_rbNode *);
__device__ bool Update_Rotation(struct par_rbNode *, enum caseFlag *);
__device__ bool Left_Rotate(struct par_rbNode *);
__device__ bool Right_Rotate(struct par_rbNode *);
__device__ void printPreorder(struct par_rbNode* );
__device__ void printInorder(struct par_rbNode* );
__device__ void printPostorder(struct par_rbNode* );
__device__ struct par_rbNode *nodes;
__device__ struct par_rbNode *root;
__device__ struct par_rbNode *NIL;
__device__ struct par_rbNode *rtParent;
__device__ struct par_rbNode *rtSibling; // U might feel this is unncessary, but it will be used
__device__ int nodeIndex = 0;
__device__ int tmpIndex = 0;
__device__ struct par_rbNode *tmp[M];// need M tmps
__device__ int createFlag = false;
__device__ void createNIL(){
NIL = &nodes[0];
NIL->color = BLACK;
NIL->key = -1;
NIL->left = NIL->right = NIL->parent = NIL;
printf("NIL created\n");
}
__device__ struct par_rbNode * createNode(int key){
bool ok = false;
do{
ok = atomicCAS(&createFlag,false,true); //Capture the lock
}while(ok);
atomicAdd(&nodeIndex,1);
atomicAdd(&tmpIndex,1);
nodes[nodeIndex].key = key;
nodes[nodeIndex].flag = true;
nodes[nodeIndex].left = nodes[nodeIndex].right = nodes[nodeIndex].parent = NIL;
tmp[tmpIndex] = &nodes[nodeIndex];
createFlag = false;
// atomicCAS(&createFlag,true,false); //Release the lock
printf("Created %d\n",key);
return tmp[tmpIndex]; // Even if this thread pauses it will eventually return the correct pointer
}
__device__ void createTree(){
rtParent = createNode(-1);
rtSibling = createNode(-1);
// NIL = createNode(-1);
root = NIL;
rtParent->parent = NIL;
rtSibling->parent = rtParent;
rtSibling->right = NIL;
rtSibling->left = NIL;
rtParent->left = root;
//rtParent->left = root; Why only left, y not right?
//ANS: Since we check for left parent condition first
//(if u don't understand, try to insert a node to a tree with only one node)
rtParent->right = rtSibling;
rtParent->flag = false;
rtSibling->flag = false;
rtParent->color = BLACK;
rtSibling->color = BLACK;
// NIL->left = NIL;
// NIL->right = NIL;
NIL->parent = rtParent;
NIL->flag = false;
// NIL->color = BLACK;
printf("Tree Created \n");
printf("\n");
}
__device__ struct par_rbNode * Traverse(struct par_rbNode *newNode,int key){
struct par_rbNode *x;
// struct par_rbNode *inertPoint;
// struct par_rbNode *savert;
bool success;
bool ok;
// do{
// savert = root;
// success = DCAS(&root->flag,false,true,&root,savert,savert); //Catching the flag of the root
// }while(!success);
//An alternate for DCAS - should check if it works or not
// do{
// savert = root;
// success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
// }while(savert!=root || !success);
do{
// savert = root;
success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
}while(success);
//success => captured the root flag
//savert != root => root has changed
//!success => root is under lock
//thread will come out of the loop only after "success" and "savert==root"
x = root;
if(x != NIL){
while(x != NIL){
struct par_rbNode *y = x;
if(key == x->key) {
x->flag = false; // Release the flag that was just caught
return NULL; // Traversing is done. Node is already there so Insert() fails.
}
if(key < x->key){
if(x->left != NIL){
ok = atomicCAS(&x->left->flag,false,true);
if(ok){
x->flag = false; // Release the flag of x
return NULL;
}//end if
x->flag = false;
x = x->left;
}else{
newNode->parent = x;
x = x->left;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}else{
if(x->right != NIL){
ok = atomicCAS(&x->right->flag,false,true);
if(ok){
x->flag = false;
return NULL;
}//end if
x->flag = false;
x = x->right;
}else{
newNode->parent = x;
x = x->right;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}//end if
}//end while
// return x->parent;
}else{
return NIL;
}
}
__device__ enum result PlaceNode(struct par_rbNode *newNode){
//flags on newNode and insPoint are held
bool ok = true;
// struct par_rbNode *uncle,*savep;
if(newNode->parent == NIL){ //tree is empty
newNode->color = BLACK;
newNode->parent = rtParent;
rtParent->left = newNode;
root=newNode;
NIL->flag = false; // release NIL node, that u caught during Traverse
newNode->flag = false;
newNode->left = newNode->right = NIL;
return FirstInsert;
}else{ // the tree is not empty so...
// newNode->parent = insPoint;
//set the flags of the grandparent and uncle
// struct par_rbNode *insPoint = newNode->parent;
printf("%d\n",newNode->parent->key);
printf("%d\n",newNode->parent->parent->left->key);
if(newNode->parent == newNode->parent->parent->left){ //uncle is right child
printf("Insert Key %d\n",newNode->parent->key);
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->right; // rtSibling is used here, when newNode->parent is root
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->right->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->right,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->right->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}else{// uncle is left child
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->left;
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->left->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->left,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->left->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}//end if
if(ok){
// This "!ok" is when u fail to capture the grandparent flag,
// u haven't caught any extra flags so just get rid of the flag of newNode->parent
newNode->parent->flag = false; // release flag
newNode->parent = NIL;
return Failure; //avoid deadlock
}
// When u have successfully captured all the required flags.
// i.e. parent, grandparent, uncle
if(newNode->key < newNode->parent->key){
//insert as left child
newNode->parent->left = newNode;
return Success;
}else{//insertas right child
newNode->parent->right = newNode;
printf("THE OK = %d\n",ok);
return Success;
}
}
}
__device__ void Insert_Rebalance(struct par_rbNode *x){ //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//we hold flags on x, p(x), p(p(x)), and uncle(x)
struct par_rbNode *oldx;
struct par_rbNode *uncle, *olduncle;
// struct par_rbNode *savep, *savegp;
struct par_rbNode *brother;
struct par_rbNode *nephew;
bool ok;
bool updateSucceeds; //Update-Rotation successded?
//caseF is short for caseFlag (avoiding confusion between global enum and local variable)
enum caseFlag caseF = NOOP; // initially not doing any case
//define uncle for first iteration
if(x->parent == x->parent->parent->left){
uncle = x->parent->parent->right;
}else{ // uncle is the left child not right
uncle = x->parent->parent->left;
}
while((x != root) && (x->parent->color == RED)){
//do color-update and/or rotaion as required
do{
updateSucceeds = Update_Rotation(x,&caseF);
}while(!updateSucceeds);
//CASE 1: move to grandparent after color update
if(caseF == DID_CASE1){
oldx = x; //save pointer to the old x
olduncle = uncle; // save pointer to old uncle;
x = x->parent->parent; // up to grandparent
do{ //find new uncle of x and get flags
if(x->parent == x->parent->parent->left){
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->right;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->right->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->right->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}else{
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->left;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->left->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->left->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}
}while(ok); //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//Release old flags for CASE 1
oldx->parent->flag = false;
olduncle->flag = false;
oldx->flag = false;
}
//in CASE 3 loop will exit: parent will be BLACK
}
switch(caseF){
case NOOP: //In the beginning of this function we had
//x,p(x),p(p(x)),uncle(x) - release them
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE1: //Release the last set of flags acquired
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE3: //release flags on ROTATED x, etc
if(x == x->parent->left){
brother = x->parent->right;
nephew = x->parent->right->right;
}else{
brother = x->parent->left;
nephew = x->parent->left->left;
}
x->parent->flag = false;
brother->flag = false;
nephew->flag = false;
x->flag = false;
break;
}
// printf("last %d\n",x->key);
root->color = BLACK;
}
__device__ bool Update_Rotation(struct par_rbNode *x, enum caseFlag *caseF){
//we hold flags on x, p(x), p(p(x)) and uncle(x)
struct par_rbNode *xUncle;
struct par_rbNode *oldx; //*ggp; // ggp -> greatgrandparent
bool ok;
if(x->parent == x->parent->parent->left){
//the parent is a left child
xUncle = x->parent->parent->right;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true; // This true is for "updateSucceeds"
}else{ // rotation(s) will be needed
if(x == x->parent->right){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Left_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false; //This false is for "updateSucceeds"
}
}
//In CASE 3, if the right-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok); //KEEPS TRYING, DOESN'T BACK OFF
ok = Right_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false; //This false is for "updateSucceeds"
}else{
x->parent->color = BLACK;
x->parent->right->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false; //remove the ggp flag as rotation was successful
return true;
}
}
//symmetric to above code
}else{
//the parent is a right child
xUncle = x->parent->parent->left;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true;
}else{ // rotation(s) will be needed
if(x == x->parent->left){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Right_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false;
}
}
//In CASE 3, if the left-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok);
ok = Left_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false;
}else{
x->parent->color = BLACK;
x->parent->left->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false;
return true;
}
}
}
}
//A rotation will always be successful(true), as u can reach the rotate command
//only after u have cptured all the requried flags
__device__ bool Left_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->right (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
// ok = CAS3(z->right,zrl,z->right,
// z->right,z,zrl->parent,
// zrl,zrl,z->right->left);
//update other links
root = zr;
rtParent->left = root;
root->parent = rtParent;
z->parent = root;
root->left = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->left){
//z is left child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->left = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}else{
// z is right child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->right = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}
}
return true;
}
//symmetric to Left_rotate
__device__ bool Right_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->left (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
// ok = CAS3(z->left,zrl,z->left,
// z->left,z,zrl->parent,
// zrl,zrl,z->left->right);
//update other links
root = zr;
rtParent->right = root;
root->parent = rtParent;
z->parent = root;
root->right = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->right){
//z is right child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->right = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}else{
// z is left child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->left = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}
}
return true;
}
__device__ void Insert(int key){
struct par_rbNode *newNode = createNode(key); //Internally the flag of the newNode is held
// struct par_rbNode *insertPoint;
// // Create and initialize the new node
// enum result res = Failure;
// //insert the new node
// do{
// //Traverse tree to find insertion point
// insertPoint = Traverse(newNode,key);
// if(insertPoint != NULL){
// //add new node to tree
// // printf("Placing Node\n");
// res = PlaceNode(newNode);
// printf("res = %d\n",res);
// // res is short for result (avoiding confusion b/w global enum and local variable)
// if(res == Success){
// printf("rebalance\n");
// //node was added succcessfully so make
// //tree red-black again by doing the
// //necessary color updates and rotations
// Insert_Rebalance(newNode);
// }
// }else{
// printf("Key Exists\n");
// res = Success;
// break;
// }
// }while(res == Failure);
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
//Functions for printing the tree
__device__ void printPreorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* then recur on left child */
printPreorder(node->left);
/* now recur on right child */
printPreorder(node->right);
}
__device__ void printInorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printInorder(node->left);
/* then print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* now recur on right child */
printInorder(node->right);
}
__device__ void printPostorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printPostorder(node->left);
/* then recur on right child */
printPostorder(node->right);
/* now print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
}
__device__ int threadsFinished = 0;
__device__ int passCreate = 0;
__global__ void RBT(struct par_rbNode *d_nodes) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
int threadCount = gridDim.x*blockDim.x;
if(id == 0){
printf("Starting the Tree\n");
nodes = d_nodes; // Make it a global variable
createNIL();
createTree();
atomicAdd(&passCreate,1);
}
Insert(5);
Insert(6);
Insert(4);
// while(1){
// if(passCreate){
// Insert(id);
// break;
// }
// }
// //Print the time
// //This will keep track of number of threads that are done
atomicAdd(&threadsFinished,1);
// // //Print the tree after all the threads are done
if(threadsFinished == threadCount){
if(id == 0){
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
}
//return to main
}
int main() {
struct par_rbNode h_nodes[M];
struct par_rbNode *d_nodes;
float time;
// 1. Allocate device array.
cudaMalloc(&d_nodes, M * sizeof(struct par_rbNode));
for(int i=0;i<M;i++){
h_nodes[i].flag = false;
h_nodes[i].color = RED;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// 2. Copy array contents from host to device.
cudaMemcpy(d_nodes, h_nodes, M * sizeof(struct par_rbNode), cudaMemcpyHostToDevice);
printf("Kernel Launched\n");
cudaEventRecord(start, 0);
RBT<<<1,1>>>(d_nodes);
cudaMemcpy(h_nodes, d_nodes, M * sizeof(struct par_rbNode), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
printf("Came back\n");
cudaEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
}
|
39beccbad4d22aff694cdd4f1cf8cfab7a016853.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Alexander Freudenberg, [email protected]
Copyright (C) 2020-2023 Alexander Freudenberg
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
extern "C" {
int cublas_uint8_gemm(unsigned char *snp_matrix, int snps, int indiv,
double *ans) {
/*
xx
*/
hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS;
hipError_t err = hipSuccess;
hipblasHandle_t handle;
hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT;
hipDataType input_type = HIP_R_8I;
hipDataType output_type = HIP_R_32F;
hipblasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
void *d_A = NULL,
*d_B = NULL;
float *d_C = NULL,
*h_C = NULL;
size_t nrowA = ((indiv - 1)/4 + 1) * 4,
ncolA = ((snps - 1)/4 + 1) * 4;
debug_info("cuBLAS uint8: Problem size: (%ld, %ld).\n", nrowA, ncolA);
const float alpha = 1.0,
beta = 0.0;
size_t size_of_input = sizeof(uint8_t) * nrowA * ncolA;
size_t size_of_output = sizeof(float) * nrowA * nrowA;
if (checkDevMemory(2 * size_of_input + size_of_output) != 0) {
return 1;
}
// Create handle
hipblasCreate(&handle);
// Allocate memory
err = hipMalloc(&d_A, size_of_input);
if (checkError(__func__, __LINE__, err) != 0)
return (1);;
err = hipMalloc(&d_B, size_of_input);
if (checkError(__func__, __LINE__, err) != 0)
return (1);;
err = hipMalloc((void**)&d_C, size_of_output);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipHostMalloc((void**)&h_C, size_of_output);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
// Copy data to device
err = hipMemcpy2D(d_A, sizeof(unsigned char) * nrowA, snp_matrix,
sizeof(unsigned char) * indiv,
sizeof(unsigned char) * indiv,
sizeof(unsigned char) * snps, hipMemcpyHostToDevice);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipMemcpy2D(d_B, sizeof(unsigned char) * nrowA, snp_matrix,
sizeof(unsigned char) * indiv,
sizeof(unsigned char) * indiv,
sizeof(unsigned char) * snps, hipMemcpyHostToDevice);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
hipDeviceSynchronize();
if (checkError(__func__, __LINE__, hipGetLastError()) != 0)
return (1);
// Calculate GEMM
status =
hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, nrowA, nrowA, ncolA,
&alpha, d_A, input_type, nrowA, d_B, input_type, nrowA,
&beta, d_C, output_type, nrowA, compute_type, algo);
hipDeviceSynchronize();
if (checkError(__func__, __LINE__, status) != 0)
return (1);
if (checkError(__func__, __LINE__, hipGetLastError()) != 0)
return (1);
// Copy data back to host
err = hipMemcpy(h_C, d_C, size_of_output, hipMemcpyDeviceToHost);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
hipDeviceSynchronize();
// Cast to double
for (long i = 0; i < indiv; i++) {
for(long j = 0; j < indiv; j++){
ans[j + i * indiv] = (double)(h_C[j + i * nrowA]);
}
}
if (checkError(__func__, __LINE__, hipGetLastError()) != 0)
return (1);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipHostFree(h_C);
hipblasDestroy(handle);
return 0;
}
}
|
39beccbad4d22aff694cdd4f1cf8cfab7a016853.cu
|
/*
Alexander Freudenberg, [email protected]
Copyright (C) 2020-2023 Alexander Freudenberg
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cublas_v2.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
extern "C" {
int cublas_uint8_gemm(unsigned char *snp_matrix, int snps, int indiv,
double *ans) {
/*
xx
*/
cublasStatus_t status = CUBLAS_STATUS_SUCCESS;
cudaError_t err = cudaSuccess;
cublasHandle_t handle;
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
cudaDataType_t input_type = CUDA_R_8I;
cudaDataType_t output_type = CUDA_R_32F;
cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
void *d_A = NULL,
*d_B = NULL;
float *d_C = NULL,
*h_C = NULL;
size_t nrowA = ((indiv - 1)/4 + 1) * 4,
ncolA = ((snps - 1)/4 + 1) * 4;
debug_info("cuBLAS uint8: Problem size: (%ld, %ld).\n", nrowA, ncolA);
const float alpha = 1.0,
beta = 0.0;
size_t size_of_input = sizeof(uint8_t) * nrowA * ncolA;
size_t size_of_output = sizeof(float) * nrowA * nrowA;
if (checkDevMemory(2 * size_of_input + size_of_output) != 0) {
return 1;
}
// Create handle
cublasCreate(&handle);
// Allocate memory
err = cudaMalloc(&d_A, size_of_input);
if (checkError(__func__, __LINE__, err) != 0)
return (1);;
err = cudaMalloc(&d_B, size_of_input);
if (checkError(__func__, __LINE__, err) != 0)
return (1);;
err = cudaMalloc((void**)&d_C, size_of_output);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaMallocHost((void**)&h_C, size_of_output);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
// Copy data to device
err = cudaMemcpy2D(d_A, sizeof(unsigned char) * nrowA, snp_matrix,
sizeof(unsigned char) * indiv,
sizeof(unsigned char) * indiv,
sizeof(unsigned char) * snps, cudaMemcpyHostToDevice);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaMemcpy2D(d_B, sizeof(unsigned char) * nrowA, snp_matrix,
sizeof(unsigned char) * indiv,
sizeof(unsigned char) * indiv,
sizeof(unsigned char) * snps, cudaMemcpyHostToDevice);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
cudaDeviceSynchronize();
if (checkError(__func__, __LINE__, cudaGetLastError()) != 0)
return (1);
// Calculate GEMM
status =
cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_T, nrowA, nrowA, ncolA,
&alpha, d_A, input_type, nrowA, d_B, input_type, nrowA,
&beta, d_C, output_type, nrowA, compute_type, algo);
cudaDeviceSynchronize();
if (checkError(__func__, __LINE__, status) != 0)
return (1);
if (checkError(__func__, __LINE__, cudaGetLastError()) != 0)
return (1);
// Copy data back to host
err = cudaMemcpy(h_C, d_C, size_of_output, cudaMemcpyDeviceToHost);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
cudaDeviceSynchronize();
// Cast to double
for (long i = 0; i < indiv; i++) {
for(long j = 0; j < indiv; j++){
ans[j + i * indiv] = (double)(h_C[j + i * nrowA]);
}
}
if (checkError(__func__, __LINE__, cudaGetLastError()) != 0)
return (1);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFreeHost(h_C);
cublasDestroy(handle);
return 0;
}
}
|
3b8b8b489bf2ad9aa453dcdb4f0d019e11c13813.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/memory/transpose.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include "cudakernel/common/common.h"
#define DIM 32
#define MAX_DIM 65533
struct FastTransposeParam {
int64_t n_outer = 1;
int64_t n_height = 1;
int64_t n_width = 1;
int64_t n_inner = 1;
void reset() {
n_outer = 1;n_height = 1;n_width = 1;n_inner = 1;
}
};
template<typename T>
__global__ void cuda_kernel_fast_trans(
const T* input,
FastTransposeParam param,
T* output)
{
__shared__ T share_val[DIM][DIM + 1];
int64_t num = blockIdx.z;
for (int n = num; n < param.n_outer; n+= gridDim.z) {
int64_t idx_w = blockIdx.x * blockDim.x + threadIdx.x;
int64_t idx_h = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_w < param.n_width && idx_h < param.n_height) {
int64_t offset = n * param.n_height * param.n_width + idx_h * param.n_width + idx_w;
share_val[threadIdx.y][threadIdx.x] = input[offset];
} else {
share_val[threadIdx.y][threadIdx.x] = (T)0;
}
__syncthreads();
idx_w = blockIdx.y * blockDim.y + threadIdx.x;
idx_h = blockIdx.x * blockDim.x + threadIdx.y;
if (idx_w < param.n_height && idx_h < param.n_width) {
int64_t offset = n * param.n_height * param.n_width + idx_h * param.n_height + idx_w;
output[offset] = share_val[threadIdx.x][threadIdx.y];
}
}
}
bool FastTransposeSupport(
FastTransposeParam* fast_param,
const ppl::nn::TensorShape *input_shape,
ppl::nn::common::TransposeParam param,
const ppl::nn::TensorShape *output_shape)
{
if (input_shape->GetDataFormat() != ppl::common::DATAFORMAT_NDARRAY ||
output_shape->GetDataFormat() != ppl::common::DATAFORMAT_NDARRAY) {
return false;
}
int num_dims = input_shape->GetDimCount();
for (int i = 0; i < num_dims; i++) {
if (param.perm[i] == i) {
fast_param->n_outer *= input_shape->GetDim(i);
continue;
} else {
fast_param->n_height = input_shape->GetDim(i);
for (int j = i + 1; j < num_dims; j++) {
if (param.perm[j - 1] == j) {
fast_param->n_width *= input_shape->GetDim(j);
} else {
return false;
}
}
break;
}
}
return true;
}
ppl::common::RetCode PPLCUDATransposeFastForwardImp(
hipStream_t stream,
FastTransposeParam param,
const ppl::nn::TensorShape *input_shape,
const void *input,
const ppl::nn::TensorShape *output_shape,
void *output)
{
dim3 dim_block(DIM, DIM, 1);
int dimz = param.n_outer >= MAX_DIM ? MAX_DIM : param.n_outer;
dim3 dim_grid(DivUp(param.n_width, DIM), DivUp(param.n_height, DIM), dimz);
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): { \
hipLaunchKernelGGL(( cuda_kernel_fast_trans), dim3(dim_grid), dim3(dim_block), 0, stream, \
(const TYPE *)input, param, (TYPE *)output); \
return ppl::common::RC_SUCCESS; \
}
switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
template<typename T>
__global__ void cuda_kernel_middle_trans(
const T* input,
int64_t num_elems,
FastTransposeParam param,
T* output)
{
int64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= num_elems) return;
int inner_idx = tid % param.n_inner;
int width_idx = (tid / param.n_inner) % param.n_width;
int height_idx = (tid / (param.n_inner * param.n_width)) % param.n_height;
int outer_idx = tid / (param.n_inner * param.n_width * param.n_height);
int64_t offset = outer_idx * param.n_inner * param.n_width * param.n_height + height_idx * param.n_inner +
width_idx * param.n_height * param.n_inner + inner_idx;
output[offset] = input[tid];
}
bool MiddleFastTransposeSupport(
FastTransposeParam* fast_param,
const ppl::nn::TensorShape *input_shape,
ppl::nn::common::TransposeParam param,
const ppl::nn::TensorShape *output_shape)
{
if (input_shape->GetDataFormat() != ppl::common::DATAFORMAT_NDARRAY ||
output_shape->GetDataFormat() != ppl::common::DATAFORMAT_NDARRAY) {
return false;
}
fast_param->reset();
int num_dims = input_shape->GetDimCount();
int height_axis = 0;
int width_axis = num_dims - 1;
for (int i = 0; i < num_dims && param.perm[i] == i; fast_param->n_outer *= input_shape->GetDim(i), height_axis = i + 1, i++);
for (int i = num_dims - 1; i >= 0 && param.perm[i] == i; fast_param->n_inner *= input_shape->GetDim(i), width_axis = i - 1, i--);
if (width_axis <= height_axis) return false;
fast_param->n_height *= input_shape->GetDim(height_axis);
fast_param->n_width *= input_shape->GetDim(width_axis);
if (width_axis - height_axis != 1) return false;
return true;
}
ppl::common::RetCode PPLCUDATransposeMiddleFastForwardImp(
hipStream_t stream,
FastTransposeParam param,
const ppl::nn::TensorShape *input_shape,
const void *input,
const ppl::nn::TensorShape *output_shape,
void *output)
{
const int block_size = 256;
dim3 dim_block(block_size, 1, 1);
int64_t num_elems = output_shape->GetElementsIncludingPadding();
dim3 dim_grid(DivUp(num_elems, block_size), 1,1);
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): { \
hipLaunchKernelGGL(( cuda_kernel_middle_trans), dim3(dim_grid), dim3(dim_block), 0, stream, \
(const TYPE *)input, num_elems, param, (TYPE *)output); \
return ppl::common::RC_SUCCESS; \
}
switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
template <typename T>
__global__ void ppl_cukernel_transpose(
int64_t num_elems,
int num_dims,
GArray<DivModFast> input_strides_fast,
GArray<int64_t> output_flip_strides,
const T* input,
T* output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
int64_t output_offset = 0;
int idx, remain = index;
for (int it = 0; it < num_dims; ++it) {
input_strides_fast[it].divmod(remain, idx, remain);
output_offset += idx * output_flip_strides[it];
}
output[output_offset] = input[index];
}
template <typename T>
__global__ void ppl_cukernel_transpose_nhwc(
int64_t num_elems,
int num_dims,
GArray<DivModFast> input_strides_fast,
GArray<int64_t> input_strides,
GArray<int64_t> output_flip_strides,
const T *input,
T *output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
int64_t input_offset = 0;
int64_t output_offset = 0;
int idx, remain = index;
for (int it = 0; it < num_dims; ++it) {
input_strides_fast[it].divmod(remain, idx, remain);
input_offset += idx * input_strides[it];
output_offset += idx * output_flip_strides[it];
}
output[output_offset] = input[input_offset];
}
ppl::common::RetCode PPLCUDATransposeForwardImp(
hipStream_t stream,
ppl::nn::common::TransposeParam param,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* output_shape,
void* output)
{
FastTransposeParam fast_param;
if (FastTransposeSupport(&fast_param, input_shape, param, output_shape)) {
return PPLCUDATransposeFastForwardImp(stream, fast_param, input_shape, input, output_shape, output);
} else if (MiddleFastTransposeSupport(&fast_param, input_shape, param, output_shape)) {
return PPLCUDATransposeMiddleFastForwardImp(stream, fast_param, input_shape, input, output_shape, output);
}
int num_dims = output_shape->GetDimCount();
int64_t num_elems = output_shape->GetElementsExcludingPadding();
GArray<DivModFast> input_strides_fast(num_dims);
GArray<int64_t> input_strides(num_dims);
GArray<int64_t> output_strides(num_dims);
int64_t acc_output_stride = 1;
int64_t acc_input_stride = 1;
for (int it = num_dims - 1; it >= 0; --it) {
input_strides_fast[it] = DivModFast(acc_input_stride);
output_strides[it] = acc_output_stride;
acc_input_stride *= input_shape->GetDim(it);
acc_output_stride *= output_shape->GetDim(it);
}
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC) {
acc_input_stride = 1;
acc_output_stride = 1;
for (int it = num_dims - 1; it >= 0; --it) {
if (it == num_dims - 1) {
input_strides[1] = acc_input_stride;
output_strides[1] = acc_output_stride;
acc_input_stride *= input_shape->GetDim(1) + input_shape->GetPadding0(1) + input_shape->GetPadding1(1);
acc_output_stride *= output_shape->GetDim(1) + output_shape->GetPadding0(1) + output_shape->GetPadding1(1);
} else if (it == 0) {
input_strides[it] = acc_input_stride;
output_strides[it] = acc_output_stride;
acc_input_stride *= input_shape->GetDim(it);
acc_output_stride *= output_shape->GetDim(it);
} else {
input_strides[it + 1] = acc_input_stride;
output_strides[it + 1] = acc_output_stride;
acc_input_stride *= input_shape->GetDim(it + 1);
acc_output_stride *= output_shape->GetDim(it + 1);
}
}
}
GArray<int64_t> output_flip_strides(num_dims);
for (int i = 0; i < num_dims; ++i) {
for (int j = 0; j < num_dims; ++j) {
if (param.perm[j] == i) {
output_flip_strides[i] = output_strides[j];
}
}
}
int block_size = 256;
int grid_size = (num_elems + block_size - 1) / block_size;
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): { \
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC){ \
hipLaunchKernelGGL(( ppl_cukernel_transpose_nhwc), dim3(grid_size), dim3(block_size), 0, stream, \
num_elems, num_dims, input_strides_fast, input_strides, \
output_flip_strides, (const TYPE *)input, (TYPE *)output); \
} else { \
hipLaunchKernelGGL(( ppl_cukernel_transpose), dim3(grid_size), dim3(block_size), 0, stream, \
num_elems, num_dims, input_strides_fast, output_flip_strides, (const TYPE *)input, (TYPE *)output); \
} \
return ppl::common::RC_SUCCESS; \
} \
switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
|
3b8b8b489bf2ad9aa453dcdb4f0d019e11c13813.cu
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/memory/transpose.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include "cudakernel/common/common.h"
#define DIM 32
#define MAX_DIM 65533
struct FastTransposeParam {
int64_t n_outer = 1;
int64_t n_height = 1;
int64_t n_width = 1;
int64_t n_inner = 1;
void reset() {
n_outer = 1;n_height = 1;n_width = 1;n_inner = 1;
}
};
template<typename T>
__global__ void cuda_kernel_fast_trans(
const T* input,
FastTransposeParam param,
T* output)
{
__shared__ T share_val[DIM][DIM + 1];
int64_t num = blockIdx.z;
for (int n = num; n < param.n_outer; n+= gridDim.z) {
int64_t idx_w = blockIdx.x * blockDim.x + threadIdx.x;
int64_t idx_h = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_w < param.n_width && idx_h < param.n_height) {
int64_t offset = n * param.n_height * param.n_width + idx_h * param.n_width + idx_w;
share_val[threadIdx.y][threadIdx.x] = input[offset];
} else {
share_val[threadIdx.y][threadIdx.x] = (T)0;
}
__syncthreads();
idx_w = blockIdx.y * blockDim.y + threadIdx.x;
idx_h = blockIdx.x * blockDim.x + threadIdx.y;
if (idx_w < param.n_height && idx_h < param.n_width) {
int64_t offset = n * param.n_height * param.n_width + idx_h * param.n_height + idx_w;
output[offset] = share_val[threadIdx.x][threadIdx.y];
}
}
}
bool FastTransposeSupport(
FastTransposeParam* fast_param,
const ppl::nn::TensorShape *input_shape,
ppl::nn::common::TransposeParam param,
const ppl::nn::TensorShape *output_shape)
{
if (input_shape->GetDataFormat() != ppl::common::DATAFORMAT_NDARRAY ||
output_shape->GetDataFormat() != ppl::common::DATAFORMAT_NDARRAY) {
return false;
}
int num_dims = input_shape->GetDimCount();
for (int i = 0; i < num_dims; i++) {
if (param.perm[i] == i) {
fast_param->n_outer *= input_shape->GetDim(i);
continue;
} else {
fast_param->n_height = input_shape->GetDim(i);
for (int j = i + 1; j < num_dims; j++) {
if (param.perm[j - 1] == j) {
fast_param->n_width *= input_shape->GetDim(j);
} else {
return false;
}
}
break;
}
}
return true;
}
ppl::common::RetCode PPLCUDATransposeFastForwardImp(
cudaStream_t stream,
FastTransposeParam param,
const ppl::nn::TensorShape *input_shape,
const void *input,
const ppl::nn::TensorShape *output_shape,
void *output)
{
dim3 dim_block(DIM, DIM, 1);
int dimz = param.n_outer >= MAX_DIM ? MAX_DIM : param.n_outer;
dim3 dim_grid(DivUp(param.n_width, DIM), DivUp(param.n_height, DIM), dimz);
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): { \
cuda_kernel_fast_trans<<<dim_grid, dim_block, 0, stream>>>( \
(const TYPE *)input, param, (TYPE *)output); \
return ppl::common::RC_SUCCESS; \
}
switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
template<typename T>
__global__ void cuda_kernel_middle_trans(
const T* input,
int64_t num_elems,
FastTransposeParam param,
T* output)
{
int64_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= num_elems) return;
int inner_idx = tid % param.n_inner;
int width_idx = (tid / param.n_inner) % param.n_width;
int height_idx = (tid / (param.n_inner * param.n_width)) % param.n_height;
int outer_idx = tid / (param.n_inner * param.n_width * param.n_height);
int64_t offset = outer_idx * param.n_inner * param.n_width * param.n_height + height_idx * param.n_inner +
width_idx * param.n_height * param.n_inner + inner_idx;
output[offset] = input[tid];
}
bool MiddleFastTransposeSupport(
FastTransposeParam* fast_param,
const ppl::nn::TensorShape *input_shape,
ppl::nn::common::TransposeParam param,
const ppl::nn::TensorShape *output_shape)
{
if (input_shape->GetDataFormat() != ppl::common::DATAFORMAT_NDARRAY ||
output_shape->GetDataFormat() != ppl::common::DATAFORMAT_NDARRAY) {
return false;
}
fast_param->reset();
int num_dims = input_shape->GetDimCount();
int height_axis = 0;
int width_axis = num_dims - 1;
for (int i = 0; i < num_dims && param.perm[i] == i; fast_param->n_outer *= input_shape->GetDim(i), height_axis = i + 1, i++);
for (int i = num_dims - 1; i >= 0 && param.perm[i] == i; fast_param->n_inner *= input_shape->GetDim(i), width_axis = i - 1, i--);
if (width_axis <= height_axis) return false;
fast_param->n_height *= input_shape->GetDim(height_axis);
fast_param->n_width *= input_shape->GetDim(width_axis);
if (width_axis - height_axis != 1) return false;
return true;
}
ppl::common::RetCode PPLCUDATransposeMiddleFastForwardImp(
cudaStream_t stream,
FastTransposeParam param,
const ppl::nn::TensorShape *input_shape,
const void *input,
const ppl::nn::TensorShape *output_shape,
void *output)
{
const int block_size = 256;
dim3 dim_block(block_size, 1, 1);
int64_t num_elems = output_shape->GetElementsIncludingPadding();
dim3 dim_grid(DivUp(num_elems, block_size), 1,1);
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): { \
cuda_kernel_middle_trans<<<dim_grid, dim_block, 0, stream>>>( \
(const TYPE *)input, num_elems, param, (TYPE *)output); \
return ppl::common::RC_SUCCESS; \
}
switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
template <typename T>
__global__ void ppl_cukernel_transpose(
int64_t num_elems,
int num_dims,
GArray<DivModFast> input_strides_fast,
GArray<int64_t> output_flip_strides,
const T* input,
T* output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
int64_t output_offset = 0;
int idx, remain = index;
for (int it = 0; it < num_dims; ++it) {
input_strides_fast[it].divmod(remain, idx, remain);
output_offset += idx * output_flip_strides[it];
}
output[output_offset] = input[index];
}
template <typename T>
__global__ void ppl_cukernel_transpose_nhwc(
int64_t num_elems,
int num_dims,
GArray<DivModFast> input_strides_fast,
GArray<int64_t> input_strides,
GArray<int64_t> output_flip_strides,
const T *input,
T *output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
int64_t input_offset = 0;
int64_t output_offset = 0;
int idx, remain = index;
for (int it = 0; it < num_dims; ++it) {
input_strides_fast[it].divmod(remain, idx, remain);
input_offset += idx * input_strides[it];
output_offset += idx * output_flip_strides[it];
}
output[output_offset] = input[input_offset];
}
ppl::common::RetCode PPLCUDATransposeForwardImp(
cudaStream_t stream,
ppl::nn::common::TransposeParam param,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* output_shape,
void* output)
{
FastTransposeParam fast_param;
if (FastTransposeSupport(&fast_param, input_shape, param, output_shape)) {
return PPLCUDATransposeFastForwardImp(stream, fast_param, input_shape, input, output_shape, output);
} else if (MiddleFastTransposeSupport(&fast_param, input_shape, param, output_shape)) {
return PPLCUDATransposeMiddleFastForwardImp(stream, fast_param, input_shape, input, output_shape, output);
}
int num_dims = output_shape->GetDimCount();
int64_t num_elems = output_shape->GetElementsExcludingPadding();
GArray<DivModFast> input_strides_fast(num_dims);
GArray<int64_t> input_strides(num_dims);
GArray<int64_t> output_strides(num_dims);
int64_t acc_output_stride = 1;
int64_t acc_input_stride = 1;
for (int it = num_dims - 1; it >= 0; --it) {
input_strides_fast[it] = DivModFast(acc_input_stride);
output_strides[it] = acc_output_stride;
acc_input_stride *= input_shape->GetDim(it);
acc_output_stride *= output_shape->GetDim(it);
}
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC) {
acc_input_stride = 1;
acc_output_stride = 1;
for (int it = num_dims - 1; it >= 0; --it) {
if (it == num_dims - 1) {
input_strides[1] = acc_input_stride;
output_strides[1] = acc_output_stride;
acc_input_stride *= input_shape->GetDim(1) + input_shape->GetPadding0(1) + input_shape->GetPadding1(1);
acc_output_stride *= output_shape->GetDim(1) + output_shape->GetPadding0(1) + output_shape->GetPadding1(1);
} else if (it == 0) {
input_strides[it] = acc_input_stride;
output_strides[it] = acc_output_stride;
acc_input_stride *= input_shape->GetDim(it);
acc_output_stride *= output_shape->GetDim(it);
} else {
input_strides[it + 1] = acc_input_stride;
output_strides[it + 1] = acc_output_stride;
acc_input_stride *= input_shape->GetDim(it + 1);
acc_output_stride *= output_shape->GetDim(it + 1);
}
}
}
GArray<int64_t> output_flip_strides(num_dims);
for (int i = 0; i < num_dims; ++i) {
for (int j = 0; j < num_dims; ++j) {
if (param.perm[j] == i) {
output_flip_strides[i] = output_strides[j];
}
}
}
int block_size = 256;
int grid_size = (num_elems + block_size - 1) / block_size;
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): { \
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC){ \
ppl_cukernel_transpose_nhwc<<<grid_size, block_size, 0, stream>>>( \
num_elems, num_dims, input_strides_fast, input_strides, \
output_flip_strides, (const TYPE *)input, (TYPE *)output); \
} else { \
ppl_cukernel_transpose<<<grid_size, block_size, 0, stream>>>( \
num_elems, num_dims, input_strides_fast, output_flip_strides, (const TYPE *)input, (TYPE *)output); \
} \
return ppl::common::RC_SUCCESS; \
} \
switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
|
e322c097116a98d34a2eeb070bb13e5dccd4c21f.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifdef USE_LEGACY_DSLASH
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <typeinfo>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <dslash.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <inline_ptx.h>
#include <dslash_policy.cuh>
namespace quda {
namespace asym_clover {
#undef GPU_STAGGERED_DIRAC
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_CLOVER_DIRAC
#define DD_CLOVER 2
#include <wilson_dslash_def.h> // Wilson Dslash kernels (including clover)
#undef DD_CLOVER
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace asym_clover
using namespace asym_clover;
#ifdef GPU_CLOVER_DIRAC
template <typename sFloat, typename gFloat, typename cFloat>
class AsymCloverDslashCuda : public SharedDslashCuda {
protected:
const FullClover &clover;
unsigned int sharedBytesPerThread() const
{
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
}
public:
AsymCloverDslashCuda(cudaColorSpinorField *out, const GaugeField &gauge, const FullClover &clover,
const cudaColorSpinorField *in, const cudaColorSpinorField *x, const double a,
const int parity, const int dagger, const int *commOverride)
: SharedDslashCuda(out, in, x, gauge, parity, dagger, commOverride), clover(clover)
{
QudaPrecision clover_prec = bindCloverTex(clover, parity, dslashParam);
if (in->Precision() != clover_prec) errorQuda("Mixing clover and spinor precision not supported");
dslashParam.a = a;
dslashParam.a_f = a;
dslashParam.cl_stride = clover.stride;
dslashParam.rho = clover.rho;
dslashParam.rho_f = clover.rho;
if (!x) errorQuda("Asymmetric clover dslash only defined for Xpay");
}
virtual ~AsymCloverDslashCuda() {
unbindSpinorTex<sFloat>(in, out, x);
unbindCloverTex(clover);
}
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X) errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
#ifndef USE_TEXTURE_OBJECTS
if (dslashParam.kernel_type == INTERIOR_KERNEL) bindSpinorTex<sFloat>(in, out, x);
#endif // USE_TEXTURE_OBJECTS
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
setParam();
dslashParam.block[0] = tp.aux.x; dslashParam.block[1] = tp.aux.y; dslashParam.block[2] = tp.aux.z; dslashParam.block[3] = tp.aux.w;
for (int i=0; i<4; i++) dslashParam.grid[i] = ( (i==0 ? 2 : 1) * in->X(i)) / dslashParam.block[i];
ASYM_DSLASH(asymCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
}
long long flops() const {
int clover_flops = 504;
long long flops = DslashCuda::flops();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
// clover flops are done in the interior kernel
flops += clover_flops * in->VolumeCB();
break;
}
return flops;
}
long long bytes() const {
bool isHalf = in->Precision() == sizeof(short) ? true : false;
int clover_bytes = 72 * in->Precision() + (isHalf ? 2*sizeof(float) : 0);
long long bytes = DslashCuda::bytes();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
bytes += clover_bytes*in->VolumeCB();
break;
}
return bytes;
}
};
#endif // GPU_CLOVER_DIRAC
void asymCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover &clover,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &a, const int *commOverride,
TimeProfile &profile)
{
#ifdef GPU_CLOVER_DIRAC
const_cast<cudaColorSpinorField*>(in)->createComms(1);
DslashCuda *dslash = nullptr;
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new AsymCloverDslashCuda<double2, double2, double2>(out, gauge, clover, in, x, a, parity, dagger, commOverride);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new AsymCloverDslashCuda<float4, float4, float4>(out, gauge, clover, in, x, a, parity, dagger, commOverride);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new AsymCloverDslashCuda<short4, short4, short4>(out, gauge, clover, in, x, a, parity, dagger, commOverride);
}
dslash::DslashPolicyTune<DslashCuda> dslash_policy(
*dslash, const_cast<cudaColorSpinorField *>(in), in->Volume(), in->GhostFace(), profile);
dslash_policy.apply(0);
delete dslash;
#else
errorQuda("Clover dslash has not been built");
#endif
}
}
#endif
|
e322c097116a98d34a2eeb070bb13e5dccd4c21f.cu
|
#ifdef USE_LEGACY_DSLASH
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <typeinfo>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <dslash.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <inline_ptx.h>
#include <dslash_policy.cuh>
namespace quda {
namespace asym_clover {
#undef GPU_STAGGERED_DIRAC
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_CLOVER_DIRAC
#define DD_CLOVER 2
#include <wilson_dslash_def.h> // Wilson Dslash kernels (including clover)
#undef DD_CLOVER
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace asym_clover
using namespace asym_clover;
#ifdef GPU_CLOVER_DIRAC
template <typename sFloat, typename gFloat, typename cFloat>
class AsymCloverDslashCuda : public SharedDslashCuda {
protected:
const FullClover &clover;
unsigned int sharedBytesPerThread() const
{
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
}
public:
AsymCloverDslashCuda(cudaColorSpinorField *out, const GaugeField &gauge, const FullClover &clover,
const cudaColorSpinorField *in, const cudaColorSpinorField *x, const double a,
const int parity, const int dagger, const int *commOverride)
: SharedDslashCuda(out, in, x, gauge, parity, dagger, commOverride), clover(clover)
{
QudaPrecision clover_prec = bindCloverTex(clover, parity, dslashParam);
if (in->Precision() != clover_prec) errorQuda("Mixing clover and spinor precision not supported");
dslashParam.a = a;
dslashParam.a_f = a;
dslashParam.cl_stride = clover.stride;
dslashParam.rho = clover.rho;
dslashParam.rho_f = clover.rho;
if (!x) errorQuda("Asymmetric clover dslash only defined for Xpay");
}
virtual ~AsymCloverDslashCuda() {
unbindSpinorTex<sFloat>(in, out, x);
unbindCloverTex(clover);
}
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X) errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
#ifndef USE_TEXTURE_OBJECTS
if (dslashParam.kernel_type == INTERIOR_KERNEL) bindSpinorTex<sFloat>(in, out, x);
#endif // USE_TEXTURE_OBJECTS
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
setParam();
dslashParam.block[0] = tp.aux.x; dslashParam.block[1] = tp.aux.y; dslashParam.block[2] = tp.aux.z; dslashParam.block[3] = tp.aux.w;
for (int i=0; i<4; i++) dslashParam.grid[i] = ( (i==0 ? 2 : 1) * in->X(i)) / dslashParam.block[i];
ASYM_DSLASH(asymCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
}
long long flops() const {
int clover_flops = 504;
long long flops = DslashCuda::flops();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
// clover flops are done in the interior kernel
flops += clover_flops * in->VolumeCB();
break;
}
return flops;
}
long long bytes() const {
bool isHalf = in->Precision() == sizeof(short) ? true : false;
int clover_bytes = 72 * in->Precision() + (isHalf ? 2*sizeof(float) : 0);
long long bytes = DslashCuda::bytes();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
bytes += clover_bytes*in->VolumeCB();
break;
}
return bytes;
}
};
#endif // GPU_CLOVER_DIRAC
void asymCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover &clover,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &a, const int *commOverride,
TimeProfile &profile)
{
#ifdef GPU_CLOVER_DIRAC
const_cast<cudaColorSpinorField*>(in)->createComms(1);
DslashCuda *dslash = nullptr;
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new AsymCloverDslashCuda<double2, double2, double2>(out, gauge, clover, in, x, a, parity, dagger, commOverride);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new AsymCloverDslashCuda<float4, float4, float4>(out, gauge, clover, in, x, a, parity, dagger, commOverride);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new AsymCloverDslashCuda<short4, short4, short4>(out, gauge, clover, in, x, a, parity, dagger, commOverride);
}
dslash::DslashPolicyTune<DslashCuda> dslash_policy(
*dslash, const_cast<cudaColorSpinorField *>(in), in->Volume(), in->GhostFace(), profile);
dslash_policy.apply(0);
delete dslash;
#else
errorQuda("Clover dslash has not been built");
#endif
}
}
#endif
|
4aef56b12eb6d63c797cadfe6b12fe40e10aa745.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* \file lbmFlowAroundCylinder.cu
* \brief GPU (Cuda) and CPU version running the same code for floating point computation debugging...
* \author Adrien Python
* \date 22.01.2017
*/
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <math.h>
#include <stdbool.h>
#include <libgen.h>
#include <pgm.h>
#include <timing.h>
#define RE 220.0 // Reynolds number
#define NX 420 // Numer of lattice nodes (width)
#define NY 180 // Numer of lattice nodes (height)
#define LY ((NY) - 1) // Height of the domain in lattice units
#define CX ((NX) / 4) // X coordinates of the cylinder
#define CY ((NY) / 2) // Y coordinates of the cylinder
#define R ((NY) / 9) // Cylinder radius
#define ULB 0.04 // Velocity in lattice units
#define NULB ((ULB) * (R) / (RE)) // Viscoscity in lattice units
#define OMEGA ((double)1. / (3*(NULB)+0.5)) // Relaxation parameter
#define NB_BLOCKS 1
//#define NB_THREADS 100
#define SQUARE(a) ((a)*(a))
#define GPU_SQUARE(a) (__dmul_rn(a,a))
#define INDEX_2D_FROM_1D(x, y, i) do { (y) = (i)/(NX), (x) = (i)%(NX); } while (0)
typedef enum { OUT_NONE, OUT_FIN, OUT_IMG } out_mode;
typedef struct {
bool obstacles[NX][NY]; // Should reside in lbm_consts but is too big for constant memory
double u[NX][NY][2];
double feq[NX][NY][9];
double fin[NX][NY][9];
double fout[NX][NY][9];
double rho[NX][NY];
double vel[NX][NY][2];
} lbm_vars;
typedef struct {
size_t col[3][3];
size_t opp[9];
ssize_t v[2][9];
double t[9];
} lbm_consts;
#ifdef COMPUTE_ON_CPU
// Tweak the code to run on CPU
#define hipMalloc(dst_ptr, size) do { *(dst_ptr) = (lbm_vars*)malloc(size); } while(0)
#define hipMemcpy(dst, src, size, mode) memcpy(dst, src, size)
#define hipMemcpyToSymbol(dst, src, size) memcpy(&dst, src, size)
#define hipFree(ptr) free(ptr)
#define HANDLE_ERROR(ans) ans
#define HANDLE_KERNEL_ERROR(...) do { __VA_ARGS__; } while(0)
#define fory(...) for (int y = 0; y < NY; ++y) { __VA_ARGS__; }
#define forxy(...) fory(for (int x = 0; x < NX; ++x) { __VA_ARGS__; })
#define RUN_KERNEL(kernel, th1, th2, ...) forxy(kernel(__VA_ARGS__, x, y))
#else
// Code for GPU usage only
#define HANDLE_ERROR(ans) (handleError((ans), __FILE__, __LINE__))
inline void handleError(hipError_t code, const char *file, int line)
{
if (code != hipSuccess) {
fprintf(stderr,"CUDA assert: %s %s %d\n", hipGetErrorString(code), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_KERNEL_ERROR(...) \
do { \
__VA_ARGS__; \
HANDLE_ERROR( hipPeekAtLastError() ); \
HANDLE_ERROR( hipDeviceSynchronize() ); \
} while(0)
#define RUN_KERNEL(kernel, th1, th2, ...) HANDLE_KERNEL_ERROR(hipLaunchKernelGGL(( kernel), dim3(dimBlock), dim3(dimGrid), 0, 0, __VA_ARGS__) )
#endif
// Constants
#ifndef COMPUTE_ON_CPU
__constant__
#endif
lbm_consts d_consts;
const ssize_t V[][9] = {
{ 1, 1, 1, 0, 0, 0,-1,-1,-1 },
{ 1, 0,-1, 1, 0,-1, 1, 0,-1 }
};
const double T[] = { 1./36, 1./9, 1./36, 1./9, 4./9, 1./9, 1./36, 1./9, 1./36 };
/**
* Setup: cylindrical obstacle and velocity inlet with perturbation
* Creation of a mask with boolean values, defining the shape of the obstacle.
*/
static void initObstacles(lbm_vars* vars)
{
for (int x = 0; x < NX; x++) {
for (int y = 0; y < NY; y++) {
vars->obstacles[x][y] = SQUARE(x-CX) + SQUARE(y-CY) < SQUARE(R);
}
}
}
/**
* Initial velocity profile: almost zero, with a slight perturbation to trigger
* the instability.
*/
static void initVelocity(lbm_vars* vars)
{
for (int d = 0; d < 2; d++) {
for (int x = 0; x < NX; x++) {
for (int y = 0; y < NY; y++) {
vars->vel[x][y][d] = (1-d) * ULB * (1 + 0.0001 * sin( y / (double)LY * 2 * M_PI) );
}
}
}
}
static void initRho(lbm_vars* vars)
{
for (int x = 0; x < NX; x++) {
for (int y = 0; y < NY; y++) {
vars->rho[x][y] = 1.0;
}
}
}
static void initCol(size_t* col, ssize_t v0)
{
for (int f = 0, i = 0; f < 9 && i < 3; f++) {
if (V[0][f] == v0) {
col[i++] = f;
}
}
}
static void initOpp(size_t* opp)
{
for (int f = 0; f < 9; f++) {
for (int g = 0; g < 9; g++) {
if (V[0][f] == -V[0][g] && V[1][f] == -V[1][g]) {
opp[f] = g;
break;
}
}
}
}
#define EQUILIBRIUM_BODY(v, t) \
do { \
double usqr = 3./2 * ( SQUARE(u[0]) + SQUARE(u[1]) ); \
\
for (int f = 0; f < 9; f++) { \
double cu = 3 * ( v[0][f] * u[0] + v[1][f] * u[1] ); \
feq[f] = rho * t[f] * ( 1 + cu + 0.5 * SQUARE(cu) - usqr ); \
} \
} while(0)
#define GPU_EQUILIBRIUM_BODY(v, t) \
do { \
double usqr = __dmul_rn(3./2, __dadd_rn( GPU_SQUARE(u[0]), GPU_SQUARE(u[1]) )); \
\
for (int f = 0; f < 9; f++) { \
double cu = 3 * ( v[0][f] * u[0] + v[1][f] * u[1] ); \
feq[f] = rho * t[f] * ( 1 + cu + 0.5 * SQUARE(cu) - usqr ); \
} \
} while(0)
#ifndef COMPUTE_ON_CPU
__host__
#endif
static void h_equilibrium(double* feq, double rho, double* u)
{
EQUILIBRIUM_BODY(V, T);
}
#ifndef COMPUTE_ON_CPU
__device__
#endif
static void d_equilibrium(double* feq, double rho, double* u)
{
#if defined(COMPUTE_ON_CPU) || ! defined(USE_GPU_OPERATORS)
EQUILIBRIUM_BODY(d_consts.v, d_consts.t);
#else
GPU_EQUILIBRIUM_BODY(d_consts.v, d_consts.t);
#endif
}
#ifndef COMPUTE_ON_CPU
__device__
#endif
static void macroscopic(double* fin, double* rho, double* u)
{
*rho = u[0] = u[1] = 0;
for (int f = 0; f < 9; f++) {
*rho += fin[f];
u[0] += d_consts.v[0][f] * fin[f];
u[1] += d_consts.v[1][f] * fin[f];
}
u[0] /= *rho;
u[1] /= *rho;
}
#ifdef COMPUTE_ON_CPU
void lbm_computation(lbm_vars *d_vars, int x, int y)
{
#else
__global__ void lbm_computation(lbm_vars *d_vars)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < NX*NY; idx += blockDim.x * gridDim.x) {
int x, y;
INDEX_2D_FROM_1D(x, y, idx);
#endif
// Right wall: outflow condition.
if (x == NX-1) {
for (int i = 0; i < 3; i++) {
int f = d_consts.col[2][i];
d_vars->fin[NX-1][y][f] = d_vars->fin[NX-2][y][f];
}
}
// Compute macroscopic variables, density and velocity
macroscopic(d_vars->fin[x][y], &d_vars->rho[x][y], d_vars->u[x][y]);
if (x == 0) {
// Left wall: inflow condition
for (size_t d = 0; d < 2; d++) {
d_vars->u[0][y][d] = d_vars->vel[0][y][d];
}
// Calculate the density
double s2 = 0, s3 = 0;
for (size_t i = 0; i < 3; i++) {
s2 += d_vars->fin[0][y][d_consts.col[1][i]];
s3 += d_vars->fin[0][y][d_consts.col[2][i]];
}
d_vars->rho[0][y] = 1./(1 - d_vars->u[0][y][0]) * (s2 + 2*s3);
}
// Compute equilibrium
d_equilibrium(d_vars->feq[x][y], d_vars->rho[x][y], d_vars->u[x][y]);
if (x == 0) {
for (size_t i = 0; i < 3; i++) {
size_t f = d_consts.col[0][i];
d_vars->fin[0][y][f] = d_vars->feq[0][y][f] + d_vars->fin[0][y][d_consts.opp[f]] - d_vars->feq[0][y][d_consts.opp[f]];
}
}
for (size_t f = 0; f < 9; f++) {
if (d_vars->obstacles[x][y]) {
// Bounce-back condition for obstacle
d_vars->fout[x][y][f] = d_vars->fin[x][y][d_consts.opp[f]];
} else {
// Collision step
#if defined(COMPUTE_ON_CPU) || ! defined(USE_GPU_OPERATORS)
d_vars->fout[x][y][f] = d_vars->fin[x][y][f] - OMEGA * (d_vars->fin[x][y][f] - d_vars->feq[x][y][f]);
#else
d_vars->fout[x][y][f] = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(d_vars->fin[x][y][f], - d_vars->feq[x][y][f])), d_vars->fin[x][y][f]);
#endif
}
}
#ifndef COMPUTE_ON_CPU
}
#endif
}
#ifdef COMPUTE_ON_CPU
void lbm_streaming(lbm_vars *d_vars, int x, int y)
{
#else
__global__ void lbm_streaming(lbm_vars *d_vars)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < NX*NY; idx += blockDim.x * gridDim.x) {
int x, y;
INDEX_2D_FROM_1D(x, y, idx);
#endif
// Streaming step
for (size_t f = 0; f < 9; f++) {
size_t x_dst = (x + NX + d_consts.v[0][f]) % NX;
size_t y_dst = (y + NY + d_consts.v[1][f]) % NY;
d_vars->fin[x_dst][y_dst][f] = d_vars->fout[x][y][f];
}
#ifndef COMPUTE_ON_CPU
}
#endif
}
void output_variables(char* filename, double var[NX][NY][9])
{
FILE* file = fopen(filename, "w");
for (size_t x = 0; x < NX; x++) {
for (size_t y = 0; y < NY; y++) {
for (size_t f = 0; f < 9; ++f) {
fprintf(file, "%64.60f\n", var[x][y][f]);
}
}
}
fclose(file);
}
void output_image(char* filename, double u[NX][NY][2])
{
pgm_image* pgm = pgm_create(NX, NY);
for (size_t x = 0; x < NX; x++) {
for (size_t y = 0; y < NY; y++) {
double vel = sqrt( SQUARE(u[x][y][0]) + SQUARE(u[x][y][1]) );
int color = 255 * min(vel * 10, 1.0);
pgm_set_pixel(pgm, x, y, color);
}
}
pgm_write(pgm, filename);
pgm_destroy(pgm);
}
int getThreads(int width, int height)
{
int dev, threads;
hipDeviceProp_t prop;
HANDLE_ERROR( hipGetDevice(&dev) );
HANDLE_ERROR( hipGetDeviceProperties(&prop, dev) );
int maxThreads = min(prop.maxThreadsDim[0], prop.maxThreadsPerBlock);
#ifdef NB_THREADS
threads = NB_THREADS;
#else
threads = prop.maxThreadsDim[0];
#endif
if (threads > maxThreads)
threads = maxThreads;
return min(threads, width*height);
}
float get_lups(int lattices, int iterations, long ns_time_diff)
{
return lattices * iterations * 1000000000.0f / ns_time_diff;
}
int main(int argc, char * const argv[])
{
// Init options to default values
const char* out_path = ".";
const char* out_pref = "lbm";
out_mode out = OUT_NONE;
ssize_t max_iter = 0;
size_t out_interval = 0;
bool print_lups = false;
bool print_avg_lups = false;
// Read arguments
while (optind < argc) {
switch (getopt(argc, argv, "pfi:I:o:O:lL")) {
case 'p': { out = OUT_IMG; break; }
case 'f': { out = OUT_FIN; break; }
case 'i': { max_iter = strtol(optarg, NULL, 10); break; }
case 'I': { out_interval = strtol(optarg, NULL, 10); break; }
case 'o': { out_path = optarg; break; }
case 'O': { out_pref = optarg; break; }
case 'l': { print_lups = true; break; }
case 'L': { print_avg_lups = true; break; }
default : { goto usage; }
}
}
// check that execution mode is set (output images or fin values)
if (max_iter < 1) {
usage:
fprintf(stderr, "usage: %s (-p | -f) -i <iter> [-I <out_interval>] [-o <out_dir>] [-O <out_prefix>] [-l] [-L]\n", basename((char*)argv[0]));
fprintf(stderr, " -p : output pictures\n");
fprintf(stderr, " -f : output populations\n");
fprintf(stderr, " -i : number of iterations\n");
fprintf(stderr, " -I : output interval; (0 if only the last iteration output in required)\n");
fprintf(stderr, " -o : output file directory\n");
fprintf(stderr, " -O : output filename prefix\n");
fprintf(stderr, " -l : print lups at each output interval\n");
fprintf(stderr, " -L : print average lups at the end\n");
return EXIT_FAILURE;
}
if (out == OUT_NONE) {
fprintf(stderr, "No output mode specified.\n");
}
#if defined(COMPUTE_ON_GPU) && !defined(USE_GPU_OPERATORS)
fprintf(stderr, "Warning, this program version do not use GPU operators. Results may differ from the CPU version of the program.\n");
#endif
lbm_consts* h_consts = (lbm_consts*)malloc(sizeof(lbm_consts));
initCol(h_consts->col[0], 1);
initCol(h_consts->col[1], 0);
initCol(h_consts->col[2], -1);
initOpp(h_consts->opp);
memcpy(h_consts->v, V, sizeof(V));
memcpy(h_consts->t, T, sizeof(T));
HANDLE_ERROR(hipMemcpyToSymbol(d_consts, h_consts, sizeof(lbm_consts)));
lbm_vars *h_vars = (lbm_vars*)malloc(sizeof(lbm_vars));
initObstacles(h_vars);
initVelocity(h_vars);
initRho(h_vars);
// Initialization of the populations at equilibrium with the given velocity.
for (int y = 0; y < NY; y++) {
for (int x = 0; x < NX; x++) {
h_equilibrium(h_vars->fin[x][y], h_vars->rho[x][y], h_vars->vel[x][y]);
}
}
lbm_vars *d_vars;
HANDLE_ERROR(hipMalloc(&d_vars, sizeof(lbm_vars)));
HANDLE_ERROR(hipMemcpy(d_vars, h_vars, sizeof(lbm_vars), hipMemcpyHostToDevice));
#ifndef COMPUTE_ON_CPU
dim3 dimBlock(NB_BLOCKS);
dim3 dimGrid(getThreads(NX, NY));
#endif
long time_diff, total_time_diff = 0;
start_time_t start_time;
timing_start(&start_time);
for (int iter = 1; iter <= max_iter; iter++) {
RUN_KERNEL(lbm_computation, NX, NY, d_vars);
RUN_KERNEL(lbm_streaming, NX, NY, d_vars);
if ( (!out_interval && iter == max_iter) || (out_interval && iter % out_interval == 0) ) {
total_time_diff += time_diff = timing_stop(&start_time);
if ( print_lups ) {
size_t iter_diff = out_interval? out_interval : (size_t)max_iter;
printf("lups: %.2f\n", get_lups(NX*NY, iter_diff, time_diff));
fflush(stdout);
}
HANDLE_ERROR(hipMemcpy(h_vars, d_vars, sizeof(lbm_vars), hipMemcpyDeviceToHost));
char* filename;
if ( out == OUT_IMG ) {
if ( asprintf(&filename, "%s/%s%d.pgm", out_path, out_pref, iter) != -1) {
output_image(filename, h_vars->u);
free(filename);
}
}
if (out == OUT_FIN) {
if ( asprintf(&filename, "%s/%s%d.out", out_path, out_pref, iter) != -1 ) {
output_variables(filename, h_vars->fin);
free(filename);
}
}
timing_start(&start_time);
}
}
if ( print_avg_lups ) {
printf("average lups: %.2f\n", get_lups(NX*NY, max_iter, total_time_diff));
}
free(h_consts);
free(h_vars);
HANDLE_ERROR(hipFree(d_vars));
return EXIT_SUCCESS;
}
|
4aef56b12eb6d63c797cadfe6b12fe40e10aa745.cu
|
/*!
* \file lbmFlowAroundCylinder.cu
* \brief GPU (Cuda) and CPU version running the same code for floating point computation debugging...
* \author Adrien Python
* \date 22.01.2017
*/
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <math.h>
#include <stdbool.h>
#include <libgen.h>
#include <pgm.h>
#include <timing.h>
#define RE 220.0 // Reynolds number
#define NX 420 // Numer of lattice nodes (width)
#define NY 180 // Numer of lattice nodes (height)
#define LY ((NY) - 1) // Height of the domain in lattice units
#define CX ((NX) / 4) // X coordinates of the cylinder
#define CY ((NY) / 2) // Y coordinates of the cylinder
#define R ((NY) / 9) // Cylinder radius
#define ULB 0.04 // Velocity in lattice units
#define NULB ((ULB) * (R) / (RE)) // Viscoscity in lattice units
#define OMEGA ((double)1. / (3*(NULB)+0.5)) // Relaxation parameter
#define NB_BLOCKS 1
//#define NB_THREADS 100
#define SQUARE(a) ((a)*(a))
#define GPU_SQUARE(a) (__dmul_rn(a,a))
#define INDEX_2D_FROM_1D(x, y, i) do { (y) = (i)/(NX), (x) = (i)%(NX); } while (0)
typedef enum { OUT_NONE, OUT_FIN, OUT_IMG } out_mode;
typedef struct {
bool obstacles[NX][NY]; // Should reside in lbm_consts but is too big for constant memory
double u[NX][NY][2];
double feq[NX][NY][9];
double fin[NX][NY][9];
double fout[NX][NY][9];
double rho[NX][NY];
double vel[NX][NY][2];
} lbm_vars;
typedef struct {
size_t col[3][3];
size_t opp[9];
ssize_t v[2][9];
double t[9];
} lbm_consts;
#ifdef COMPUTE_ON_CPU
// Tweak the code to run on CPU
#define cudaMalloc(dst_ptr, size) do { *(dst_ptr) = (lbm_vars*)malloc(size); } while(0)
#define cudaMemcpy(dst, src, size, mode) memcpy(dst, src, size)
#define cudaMemcpyToSymbol(dst, src, size) memcpy(&dst, src, size)
#define cudaFree(ptr) free(ptr)
#define HANDLE_ERROR(ans) ans
#define HANDLE_KERNEL_ERROR(...) do { __VA_ARGS__; } while(0)
#define fory(...) for (int y = 0; y < NY; ++y) { __VA_ARGS__; }
#define forxy(...) fory(for (int x = 0; x < NX; ++x) { __VA_ARGS__; })
#define RUN_KERNEL(kernel, th1, th2, ...) forxy(kernel(__VA_ARGS__, x, y))
#else
// Code for GPU usage only
#define HANDLE_ERROR(ans) (handleError((ans), __FILE__, __LINE__))
inline void handleError(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr,"CUDA assert: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_KERNEL_ERROR(...) \
do { \
__VA_ARGS__; \
HANDLE_ERROR( cudaPeekAtLastError() ); \
HANDLE_ERROR( cudaDeviceSynchronize() ); \
} while(0)
#define RUN_KERNEL(kernel, th1, th2, ...) HANDLE_KERNEL_ERROR( kernel<<<dimBlock, dimGrid>>>(__VA_ARGS__) )
#endif
// Constants
#ifndef COMPUTE_ON_CPU
__constant__
#endif
lbm_consts d_consts;
const ssize_t V[][9] = {
{ 1, 1, 1, 0, 0, 0,-1,-1,-1 },
{ 1, 0,-1, 1, 0,-1, 1, 0,-1 }
};
const double T[] = { 1./36, 1./9, 1./36, 1./9, 4./9, 1./9, 1./36, 1./9, 1./36 };
/**
* Setup: cylindrical obstacle and velocity inlet with perturbation
* Creation of a mask with boolean values, defining the shape of the obstacle.
*/
static void initObstacles(lbm_vars* vars)
{
for (int x = 0; x < NX; x++) {
for (int y = 0; y < NY; y++) {
vars->obstacles[x][y] = SQUARE(x-CX) + SQUARE(y-CY) < SQUARE(R);
}
}
}
/**
* Initial velocity profile: almost zero, with a slight perturbation to trigger
* the instability.
*/
static void initVelocity(lbm_vars* vars)
{
for (int d = 0; d < 2; d++) {
for (int x = 0; x < NX; x++) {
for (int y = 0; y < NY; y++) {
vars->vel[x][y][d] = (1-d) * ULB * (1 + 0.0001 * sin( y / (double)LY * 2 * M_PI) );
}
}
}
}
static void initRho(lbm_vars* vars)
{
for (int x = 0; x < NX; x++) {
for (int y = 0; y < NY; y++) {
vars->rho[x][y] = 1.0;
}
}
}
static void initCol(size_t* col, ssize_t v0)
{
for (int f = 0, i = 0; f < 9 && i < 3; f++) {
if (V[0][f] == v0) {
col[i++] = f;
}
}
}
static void initOpp(size_t* opp)
{
for (int f = 0; f < 9; f++) {
for (int g = 0; g < 9; g++) {
if (V[0][f] == -V[0][g] && V[1][f] == -V[1][g]) {
opp[f] = g;
break;
}
}
}
}
#define EQUILIBRIUM_BODY(v, t) \
do { \
double usqr = 3./2 * ( SQUARE(u[0]) + SQUARE(u[1]) ); \
\
for (int f = 0; f < 9; f++) { \
double cu = 3 * ( v[0][f] * u[0] + v[1][f] * u[1] ); \
feq[f] = rho * t[f] * ( 1 + cu + 0.5 * SQUARE(cu) - usqr ); \
} \
} while(0)
#define GPU_EQUILIBRIUM_BODY(v, t) \
do { \
double usqr = __dmul_rn(3./2, __dadd_rn( GPU_SQUARE(u[0]), GPU_SQUARE(u[1]) )); \
\
for (int f = 0; f < 9; f++) { \
double cu = 3 * ( v[0][f] * u[0] + v[1][f] * u[1] ); \
feq[f] = rho * t[f] * ( 1 + cu + 0.5 * SQUARE(cu) - usqr ); \
} \
} while(0)
#ifndef COMPUTE_ON_CPU
__host__
#endif
static void h_equilibrium(double* feq, double rho, double* u)
{
EQUILIBRIUM_BODY(V, T);
}
#ifndef COMPUTE_ON_CPU
__device__
#endif
static void d_equilibrium(double* feq, double rho, double* u)
{
#if defined(COMPUTE_ON_CPU) || ! defined(USE_GPU_OPERATORS)
EQUILIBRIUM_BODY(d_consts.v, d_consts.t);
#else
GPU_EQUILIBRIUM_BODY(d_consts.v, d_consts.t);
#endif
}
#ifndef COMPUTE_ON_CPU
__device__
#endif
static void macroscopic(double* fin, double* rho, double* u)
{
*rho = u[0] = u[1] = 0;
for (int f = 0; f < 9; f++) {
*rho += fin[f];
u[0] += d_consts.v[0][f] * fin[f];
u[1] += d_consts.v[1][f] * fin[f];
}
u[0] /= *rho;
u[1] /= *rho;
}
#ifdef COMPUTE_ON_CPU
void lbm_computation(lbm_vars *d_vars, int x, int y)
{
#else
__global__ void lbm_computation(lbm_vars *d_vars)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < NX*NY; idx += blockDim.x * gridDim.x) {
int x, y;
INDEX_2D_FROM_1D(x, y, idx);
#endif
// Right wall: outflow condition.
if (x == NX-1) {
for (int i = 0; i < 3; i++) {
int f = d_consts.col[2][i];
d_vars->fin[NX-1][y][f] = d_vars->fin[NX-2][y][f];
}
}
// Compute macroscopic variables, density and velocity
macroscopic(d_vars->fin[x][y], &d_vars->rho[x][y], d_vars->u[x][y]);
if (x == 0) {
// Left wall: inflow condition
for (size_t d = 0; d < 2; d++) {
d_vars->u[0][y][d] = d_vars->vel[0][y][d];
}
// Calculate the density
double s2 = 0, s3 = 0;
for (size_t i = 0; i < 3; i++) {
s2 += d_vars->fin[0][y][d_consts.col[1][i]];
s3 += d_vars->fin[0][y][d_consts.col[2][i]];
}
d_vars->rho[0][y] = 1./(1 - d_vars->u[0][y][0]) * (s2 + 2*s3);
}
// Compute equilibrium
d_equilibrium(d_vars->feq[x][y], d_vars->rho[x][y], d_vars->u[x][y]);
if (x == 0) {
for (size_t i = 0; i < 3; i++) {
size_t f = d_consts.col[0][i];
d_vars->fin[0][y][f] = d_vars->feq[0][y][f] + d_vars->fin[0][y][d_consts.opp[f]] - d_vars->feq[0][y][d_consts.opp[f]];
}
}
for (size_t f = 0; f < 9; f++) {
if (d_vars->obstacles[x][y]) {
// Bounce-back condition for obstacle
d_vars->fout[x][y][f] = d_vars->fin[x][y][d_consts.opp[f]];
} else {
// Collision step
#if defined(COMPUTE_ON_CPU) || ! defined(USE_GPU_OPERATORS)
d_vars->fout[x][y][f] = d_vars->fin[x][y][f] - OMEGA * (d_vars->fin[x][y][f] - d_vars->feq[x][y][f]);
#else
d_vars->fout[x][y][f] = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(d_vars->fin[x][y][f], - d_vars->feq[x][y][f])), d_vars->fin[x][y][f]);
#endif
}
}
#ifndef COMPUTE_ON_CPU
}
#endif
}
#ifdef COMPUTE_ON_CPU
void lbm_streaming(lbm_vars *d_vars, int x, int y)
{
#else
__global__ void lbm_streaming(lbm_vars *d_vars)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < NX*NY; idx += blockDim.x * gridDim.x) {
int x, y;
INDEX_2D_FROM_1D(x, y, idx);
#endif
// Streaming step
for (size_t f = 0; f < 9; f++) {
size_t x_dst = (x + NX + d_consts.v[0][f]) % NX;
size_t y_dst = (y + NY + d_consts.v[1][f]) % NY;
d_vars->fin[x_dst][y_dst][f] = d_vars->fout[x][y][f];
}
#ifndef COMPUTE_ON_CPU
}
#endif
}
void output_variables(char* filename, double var[NX][NY][9])
{
FILE* file = fopen(filename, "w");
for (size_t x = 0; x < NX; x++) {
for (size_t y = 0; y < NY; y++) {
for (size_t f = 0; f < 9; ++f) {
fprintf(file, "%64.60f\n", var[x][y][f]);
}
}
}
fclose(file);
}
void output_image(char* filename, double u[NX][NY][2])
{
pgm_image* pgm = pgm_create(NX, NY);
for (size_t x = 0; x < NX; x++) {
for (size_t y = 0; y < NY; y++) {
double vel = sqrt( SQUARE(u[x][y][0]) + SQUARE(u[x][y][1]) );
int color = 255 * min(vel * 10, 1.0);
pgm_set_pixel(pgm, x, y, color);
}
}
pgm_write(pgm, filename);
pgm_destroy(pgm);
}
int getThreads(int width, int height)
{
int dev, threads;
cudaDeviceProp prop;
HANDLE_ERROR( cudaGetDevice(&dev) );
HANDLE_ERROR( cudaGetDeviceProperties(&prop, dev) );
int maxThreads = min(prop.maxThreadsDim[0], prop.maxThreadsPerBlock);
#ifdef NB_THREADS
threads = NB_THREADS;
#else
threads = prop.maxThreadsDim[0];
#endif
if (threads > maxThreads)
threads = maxThreads;
return min(threads, width*height);
}
float get_lups(int lattices, int iterations, long ns_time_diff)
{
return lattices * iterations * 1000000000.0f / ns_time_diff;
}
int main(int argc, char * const argv[])
{
// Init options to default values
const char* out_path = ".";
const char* out_pref = "lbm";
out_mode out = OUT_NONE;
ssize_t max_iter = 0;
size_t out_interval = 0;
bool print_lups = false;
bool print_avg_lups = false;
// Read arguments
while (optind < argc) {
switch (getopt(argc, argv, "pfi:I:o:O:lL")) {
case 'p': { out = OUT_IMG; break; }
case 'f': { out = OUT_FIN; break; }
case 'i': { max_iter = strtol(optarg, NULL, 10); break; }
case 'I': { out_interval = strtol(optarg, NULL, 10); break; }
case 'o': { out_path = optarg; break; }
case 'O': { out_pref = optarg; break; }
case 'l': { print_lups = true; break; }
case 'L': { print_avg_lups = true; break; }
default : { goto usage; }
}
}
// check that execution mode is set (output images or fin values)
if (max_iter < 1) {
usage:
fprintf(stderr, "usage: %s (-p | -f) -i <iter> [-I <out_interval>] [-o <out_dir>] [-O <out_prefix>] [-l] [-L]\n", basename((char*)argv[0]));
fprintf(stderr, " -p : output pictures\n");
fprintf(stderr, " -f : output populations\n");
fprintf(stderr, " -i : number of iterations\n");
fprintf(stderr, " -I : output interval; (0 if only the last iteration output in required)\n");
fprintf(stderr, " -o : output file directory\n");
fprintf(stderr, " -O : output filename prefix\n");
fprintf(stderr, " -l : print lups at each output interval\n");
fprintf(stderr, " -L : print average lups at the end\n");
return EXIT_FAILURE;
}
if (out == OUT_NONE) {
fprintf(stderr, "No output mode specified.\n");
}
#if defined(COMPUTE_ON_GPU) && !defined(USE_GPU_OPERATORS)
fprintf(stderr, "Warning, this program version do not use GPU operators. Results may differ from the CPU version of the program.\n");
#endif
lbm_consts* h_consts = (lbm_consts*)malloc(sizeof(lbm_consts));
initCol(h_consts->col[0], 1);
initCol(h_consts->col[1], 0);
initCol(h_consts->col[2], -1);
initOpp(h_consts->opp);
memcpy(h_consts->v, V, sizeof(V));
memcpy(h_consts->t, T, sizeof(T));
HANDLE_ERROR(cudaMemcpyToSymbol(d_consts, h_consts, sizeof(lbm_consts)));
lbm_vars *h_vars = (lbm_vars*)malloc(sizeof(lbm_vars));
initObstacles(h_vars);
initVelocity(h_vars);
initRho(h_vars);
// Initialization of the populations at equilibrium with the given velocity.
for (int y = 0; y < NY; y++) {
for (int x = 0; x < NX; x++) {
h_equilibrium(h_vars->fin[x][y], h_vars->rho[x][y], h_vars->vel[x][y]);
}
}
lbm_vars *d_vars;
HANDLE_ERROR(cudaMalloc(&d_vars, sizeof(lbm_vars)));
HANDLE_ERROR(cudaMemcpy(d_vars, h_vars, sizeof(lbm_vars), cudaMemcpyHostToDevice));
#ifndef COMPUTE_ON_CPU
dim3 dimBlock(NB_BLOCKS);
dim3 dimGrid(getThreads(NX, NY));
#endif
long time_diff, total_time_diff = 0;
start_time_t start_time;
timing_start(&start_time);
for (int iter = 1; iter <= max_iter; iter++) {
RUN_KERNEL(lbm_computation, NX, NY, d_vars);
RUN_KERNEL(lbm_streaming, NX, NY, d_vars);
if ( (!out_interval && iter == max_iter) || (out_interval && iter % out_interval == 0) ) {
total_time_diff += time_diff = timing_stop(&start_time);
if ( print_lups ) {
size_t iter_diff = out_interval? out_interval : (size_t)max_iter;
printf("lups: %.2f\n", get_lups(NX*NY, iter_diff, time_diff));
fflush(stdout);
}
HANDLE_ERROR(cudaMemcpy(h_vars, d_vars, sizeof(lbm_vars), cudaMemcpyDeviceToHost));
char* filename;
if ( out == OUT_IMG ) {
if ( asprintf(&filename, "%s/%s%d.pgm", out_path, out_pref, iter) != -1) {
output_image(filename, h_vars->u);
free(filename);
}
}
if (out == OUT_FIN) {
if ( asprintf(&filename, "%s/%s%d.out", out_path, out_pref, iter) != -1 ) {
output_variables(filename, h_vars->fin);
free(filename);
}
}
timing_start(&start_time);
}
}
if ( print_avg_lups ) {
printf("average lups: %.2f\n", get_lups(NX*NY, max_iter, total_time_diff));
}
free(h_consts);
free(h_vars);
HANDLE_ERROR(cudaFree(d_vars));
return EXIT_SUCCESS;
}
|
7a8fd04e777dee815227bfe16da970d790187c9a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/round_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void round_kernel(const int nthreads, const Dtype* const bottom_data,
const int num, const int channels, const int height, const int width, Dtype* const top_data, bool flag) {
CUDA_KERNEL_LOOP(index, nthreads) {
// Flag ZERO_ONE_RoundONE_MINUS_ONE
if (flag)
{
top_data[index] = bottom_data[index]>0.5 ? 1 : 0;
}
else
{
top_data[index] = bottom_data[index]>0 ? 1 : -1;
}
}
}
template <typename Dtype>
__global__ void round_mul_kernel(const int nthreads, const Dtype* const bottom_data,
const Dtype* const hash, Dtype* const top_data, int grp) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype tmp;
if (bottom_data[index] < 0){
tmp = 0;
}
else if (bottom_data[index] > 1){
tmp = 1;
}
else{
tmp = bottom_data[index];
}
top_data[index] = hash[int(tmp*grp)];
}
}
template <typename Dtype>
void RoundLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* const top_data = top[0]->mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
//const Dtype* const mod = model.gpu_data();
int count = bottom[0]->count();
if (mult){
round_mul_kernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >
(count, bottom_data, hash_.gpu_data(), top_data, groups);
}
else{
round_kernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >
(count, bottom_data, num_, ch_, h_, w_, top_data, zero_one);
}
CUDA_POST_KERNEL_CHECK;
//LOG(INFO) << "forward";
}
template <typename Dtype>
__global__ void round_kernel_backward(const int nthreads,const Dtype* const top_diff,
const Dtype* const bottom_data, Dtype* const bottom_diff,bool flag) {
CUDA_KERNEL_LOOP(index, nthreads) {
if (flag){
if (bottom_data[index] >= 0 && bottom_data[index] <= 1)
bottom_diff[index] = top_diff[index];
else if (bottom_data[index]<0 && top_diff[index]<0)
bottom_diff[index] = top_diff[index];
else if (bottom_data[index]>1 && top_diff[index]>0)
bottom_diff[index] = top_diff[index];
else
bottom_diff[index] = 0;
}
else{
if (bottom_data[index] >= 0 && bottom_data[index] <= 1)
bottom_diff[index] = top_diff[index];
else if (bottom_data[index]<-1 && top_diff[index]<0)
bottom_diff[index] = top_diff[index];
else if (bottom_data[index]>1 && top_diff[index]>0)
bottom_diff[index] = top_diff[index];
else
bottom_diff[index] = 0;
}
}
}
template <typename Dtype>
void RoundLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* const top_diff = top[0]->gpu_diff();
Dtype* const bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* const bottom_data = bottom[0]->gpu_data();
int count = bottom[0]->count();
round_kernel_backward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >
(count, top_diff, bottom_data, bottom_diff, zero_one);
//LOG(INFO) << "1";
CUDA_POST_KERNEL_CHECK;
Dtype sum_ratio;
//LOG(INFO) << weight_;
caffe_gpu_asum(top[0]->count(), top[0]->gpu_data(), &sum_ratio);
this->blobs_[0]->mutable_cpu_data()[0] = sum_ratio / top[0]->count();
if (sum_ratio>ratio_*top[0]->count())
{
caffe_gpu_add_scalar(bottom[0]->count(), scale, bottom[0]->mutable_gpu_diff());
}
//LOG(INFO) << "backward";
}
INSTANTIATE_LAYER_GPU_FUNCS(RoundLayer);
} // namespace caffe
|
7a8fd04e777dee815227bfe16da970d790187c9a.cu
|
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/round_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void round_kernel(const int nthreads, const Dtype* const bottom_data,
const int num, const int channels, const int height, const int width, Dtype* const top_data, bool flag) {
CUDA_KERNEL_LOOP(index, nthreads) {
// Flag 代表是普通ZERO_ONE_Round还是ONE_MINUS_ONE
if (flag)
{
top_data[index] = bottom_data[index]>0.5 ? 1 : 0;
}
else
{
top_data[index] = bottom_data[index]>0 ? 1 : -1;
}
}
}
template <typename Dtype>
__global__ void round_mul_kernel(const int nthreads, const Dtype* const bottom_data,
const Dtype* const hash, Dtype* const top_data, int grp) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype tmp;
if (bottom_data[index] < 0){
tmp = 0;
}
else if (bottom_data[index] > 1){
tmp = 1;
}
else{
tmp = bottom_data[index];
}
top_data[index] = hash[int(tmp*grp)];
}
}
template <typename Dtype>
void RoundLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* const top_data = top[0]->mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
//const Dtype* const mod = model.gpu_data();
int count = bottom[0]->count();
if (mult){
round_mul_kernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >
(count, bottom_data, hash_.gpu_data(), top_data, groups);
}
else{
round_kernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >
(count, bottom_data, num_, ch_, h_, w_, top_data, zero_one);
}
CUDA_POST_KERNEL_CHECK;
//LOG(INFO) << "forward";
}
template <typename Dtype>
__global__ void round_kernel_backward(const int nthreads,const Dtype* const top_diff,
const Dtype* const bottom_data, Dtype* const bottom_diff,bool flag) {
CUDA_KERNEL_LOOP(index, nthreads) {
if (flag){
if (bottom_data[index] >= 0 && bottom_data[index] <= 1)
bottom_diff[index] = top_diff[index];
else if (bottom_data[index]<0 && top_diff[index]<0)
bottom_diff[index] = top_diff[index];
else if (bottom_data[index]>1 && top_diff[index]>0)
bottom_diff[index] = top_diff[index];
else
bottom_diff[index] = 0;
}
else{
if (bottom_data[index] >= 0 && bottom_data[index] <= 1)
bottom_diff[index] = top_diff[index];
else if (bottom_data[index]<-1 && top_diff[index]<0)
bottom_diff[index] = top_diff[index];
else if (bottom_data[index]>1 && top_diff[index]>0)
bottom_diff[index] = top_diff[index];
else
bottom_diff[index] = 0;
}
}
}
template <typename Dtype>
void RoundLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* const top_diff = top[0]->gpu_diff();
Dtype* const bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* const bottom_data = bottom[0]->gpu_data();
int count = bottom[0]->count();
round_kernel_backward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >
(count, top_diff, bottom_data, bottom_diff, zero_one);
//LOG(INFO) << "1";
CUDA_POST_KERNEL_CHECK;
Dtype sum_ratio;
//LOG(INFO) << weight_;
caffe_gpu_asum(top[0]->count(), top[0]->gpu_data(), &sum_ratio);
this->blobs_[0]->mutable_cpu_data()[0] = sum_ratio / top[0]->count();
if (sum_ratio>ratio_*top[0]->count())
{
caffe_gpu_add_scalar(bottom[0]->count(), scale, bottom[0]->mutable_gpu_diff());
}
//LOG(INFO) << "backward";
}
INSTANTIATE_LAYER_GPU_FUNCS(RoundLayer);
} // namespace caffe
|
bb63475eba1b11e833a03dae2d9138eac1e37bc2.hip
|
// !!! This is a file automatically generated by hipify!!!
// nvcc 036 sgemm.cu -lcublas
#include <stdio.h>
#include "rocblas.h"
#define IDX2C(i,j,ld) (((j)*( ld ))+( i ))
#define m 6 // a - mxk matrix
#define n 4 // b - kxn matrix
#define k 5 // c - mxn matrix
int main(void) {
hipblasHandle_t handle; // CUBLAS context
int i, j; // i-row index ,j- column index
float* a; // mxk matrix
float* b; // kxn matrix
float* c; // mxn matrix
// unified memory for a,b,c
hipMallocManaged(&a, m*k * sizeof(float));
hipMallocManaged(&b, k*n * sizeof(float));
hipMallocManaged(&c, m*n * sizeof(float));
// define an mxk matrix a column by column
int ind = 11; // a:
for (j = 0;j < k;j++) { // 11 ,17 ,23 ,29 ,35
for (i = 0;i < m;i++) { // 12 ,18 ,24 ,30 ,36
a[IDX2C(i, j, m)] = (float)ind++; // 13 ,19 ,25 ,31 ,37
} // 14 ,20 ,26 ,32 ,38
} // 15 ,21 ,27 ,33 ,39
// 16 ,22 ,28 ,34 ,40
// print a row by row
printf("a:\n");
for (i = 0;i < m;i++) {
for (j = 0;j < k;j++) {
printf(" %5.0f", a[IDX2C(i, j, m)]);
}
printf("\n");
}
// define a kxn matrix b column by column
ind = 11; // b:
for (j = 0;j < n;j++) { // 11 ,16 ,21 ,26
for (i = 0;i < k;i++) { // 12 ,17 ,22 ,27
b[IDX2C(i, j, k)] = (float)ind++; // 13 ,18 ,23 ,28
} // 14 ,19 ,24 ,29
} // 15 ,20 ,25 ,30
// print b row by row
printf("b:\n");
for (i = 0;i < k;i++) {
for (j = 0;j < n;j++) {
printf(" %5.0f", b[IDX2C(i, j, k)]);
}
printf("\n");
}
// define an mxn matrix c column by column
ind = 11; // c:
for (j = 0;j < n;j++) { // 11 ,17 ,23 ,29
for (i = 0;i < m;i++) { // 12 ,18 ,24 ,30
c[IDX2C(i, j, m)] = (float)ind++; // 13 ,19 ,25 ,31
} // 14 ,20 ,26 ,32
} // 15 ,21 ,27 ,33
// 16 ,22 ,28 ,34
// print c row by row
printf("c:\n");
for (i = 0;i < m;i++) {
for (j = 0;j < n;j++) {
printf(" %5.0f", c[IDX2C(i, j, m)]);
}
printf("\n");
}
hipblasCreate(&handle); // initialize CUBLAS context
float al = 1.0f; // al =1
float bet = 1.0f; // bet =1
// matrix - matrix multiplication : c = al*a*b + bet *c
// a -mxk matrix , b -kxn matrix , c -mxn matrix ;
// al ,bet -scalars
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &al, a, m, b, k,
&bet, c, m);
hipDeviceSynchronize();
printf("c after Sgemm :\n");
for (i = 0;i < m;i++) {
for (j = 0;j < n;j++) {
printf(" %7.0f", c[IDX2C(i, j, m)]); // print c after Sgemm
}
printf("\n");
}
hipFree(a); // free memory
hipFree(b); // free memory
hipFree(c); // free memory
hipblasDestroy(handle); // destroy CUBLAS context
return EXIT_SUCCESS;
}
|
bb63475eba1b11e833a03dae2d9138eac1e37bc2.cu
|
// nvcc 036 sgemm.cu -lcublas
#include <stdio.h>
#include "cublas_v2.h"
#define IDX2C(i,j,ld) (((j)*( ld ))+( i ))
#define m 6 // a - mxk matrix
#define n 4 // b - kxn matrix
#define k 5 // c - mxn matrix
int main(void) {
cublasHandle_t handle; // CUBLAS context
int i, j; // i-row index ,j- column index
float* a; // mxk matrix
float* b; // kxn matrix
float* c; // mxn matrix
// unified memory for a,b,c
cudaMallocManaged(&a, m*k * sizeof(float));
cudaMallocManaged(&b, k*n * sizeof(float));
cudaMallocManaged(&c, m*n * sizeof(float));
// define an mxk matrix a column by column
int ind = 11; // a:
for (j = 0;j < k;j++) { // 11 ,17 ,23 ,29 ,35
for (i = 0;i < m;i++) { // 12 ,18 ,24 ,30 ,36
a[IDX2C(i, j, m)] = (float)ind++; // 13 ,19 ,25 ,31 ,37
} // 14 ,20 ,26 ,32 ,38
} // 15 ,21 ,27 ,33 ,39
// 16 ,22 ,28 ,34 ,40
// print a row by row
printf("a:\n");
for (i = 0;i < m;i++) {
for (j = 0;j < k;j++) {
printf(" %5.0f", a[IDX2C(i, j, m)]);
}
printf("\n");
}
// define a kxn matrix b column by column
ind = 11; // b:
for (j = 0;j < n;j++) { // 11 ,16 ,21 ,26
for (i = 0;i < k;i++) { // 12 ,17 ,22 ,27
b[IDX2C(i, j, k)] = (float)ind++; // 13 ,18 ,23 ,28
} // 14 ,19 ,24 ,29
} // 15 ,20 ,25 ,30
// print b row by row
printf("b:\n");
for (i = 0;i < k;i++) {
for (j = 0;j < n;j++) {
printf(" %5.0f", b[IDX2C(i, j, k)]);
}
printf("\n");
}
// define an mxn matrix c column by column
ind = 11; // c:
for (j = 0;j < n;j++) { // 11 ,17 ,23 ,29
for (i = 0;i < m;i++) { // 12 ,18 ,24 ,30
c[IDX2C(i, j, m)] = (float)ind++; // 13 ,19 ,25 ,31
} // 14 ,20 ,26 ,32
} // 15 ,21 ,27 ,33
// 16 ,22 ,28 ,34
// print c row by row
printf("c:\n");
for (i = 0;i < m;i++) {
for (j = 0;j < n;j++) {
printf(" %5.0f", c[IDX2C(i, j, m)]);
}
printf("\n");
}
cublasCreate(&handle); // initialize CUBLAS context
float al = 1.0f; // al =1
float bet = 1.0f; // bet =1
// matrix - matrix multiplication : c = al*a*b + bet *c
// a -mxk matrix , b -kxn matrix , c -mxn matrix ;
// al ,bet -scalars
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &al, a, m, b, k,
&bet, c, m);
cudaDeviceSynchronize();
printf("c after Sgemm :\n");
for (i = 0;i < m;i++) {
for (j = 0;j < n;j++) {
printf(" %7.0f", c[IDX2C(i, j, m)]); // print c after Sgemm
}
printf("\n");
}
cudaFree(a); // free memory
cudaFree(b); // free memory
cudaFree(c); // free memory
cublasDestroy(handle); // destroy CUBLAS context
return EXIT_SUCCESS;
}
|
00fca0216f15e0db6529781fc5ce5ce6e91fb299.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "singlebfs.h"
hipDeviceProp_t prop0;
// clustercomm.c LIB FUNCTION MIN 3rd ARG changed to be communicated
void FALCmpiinit(int argc,char **argv){
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &FALCrank);
MPI_Comm_size(MPI_COMM_WORLD, &FALCsize);
gethostname(FALChostname,255);
FALCsendbuff=(struct FALCbuffer *)malloc(sizeof(struct FALCbuffer )*FALCsize);
FALCrecvbuff=(struct FALCbuffer *)malloc(sizeof(struct FALCbuffer )*FALCsize);
FALCsendsize=(int *)malloc(sizeof(int)*FALCsize);
FALCrecvsize=(int *)malloc(sizeof(int)*FALCsize);
for(int i=0;i<FALCsize;i++){
FALCsendsize[i]=FALCrecvsize[i]=0;}
FALCstatus=(MPI_Status *)malloc(sizeof(MPI_Status)*FALCsize);
FALCrequest=(MPI_Request *)malloc(sizeof(MPI_Request)*FALCsize);
}
__device__ int changed =0;
;
int hchanged ;
__global__ void reset ( GGraph graph ,int FALCX)
{
/* 0 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.npoints){
((struct struct_hgraph *)(graph.extra))->dist[id]=1234567890;
}//end fun 0
}
__global__ void BFS ( GGraph graph ,int FALCX)
{
/* 0 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.npoints){
int ch ;
int falcft0=graph.index[id+1]-graph.index[id];
int falcft1=graph.index[id];
/*XX*/for(int falcft2=0;falcft2<falcft0;falcft2++){
int ut0=2*(falcft1+falcft2);
int ut1=graph.edges[ut0].ipe;
int ut2=graph.edges[ut0+1].ipe;
GMIN(&(((struct struct_hgraph *)(graph.extra))->dist/*xx*/[ut1]),((struct struct_hgraph *)(graph.extra))->dist[id]+1,/*xx*/changed);//rhs not null
}//foreach
}//end fun 0
}
int main ( int argc ,char * name [ ] )
{FALCmpiinit(argc,argv);
sprintf(partitionfile,"%s",argv[2]);
/*s1 0 0*/HGraph hgraph ;
hgraph.readPointsN(partitionfile,FALCsize);
hgraph.makeNPartitionsMPI(name[1],FALCrank,FALCsize);
int hosthgraph=0;
alloc_extra_hgraph(hgraph,hosthgraph,hgraph.npoints);
FALCallocbuff(FALCsendbuff,FALCsize,hgraph.remotepoints);
FALCallocbuff(FALCrecvbuff,FALCsize,hgraph.npoints);
;
GGraph graph;
/*TE=1*///GPU ASS
hgraph.cloneGPU(graph,0 );
int graphflag=0;
alloc_extra_graph(graph,graphflag,graph.npoints);
int TPB0=findthreadsperblock(&prop0);
int graphpointkernelblocks=findblocksize(graph,graph.npoints,TPB0);
int graphedgekernelblocks=findblocksize(graph,graph.nedges,TPB0);
copygraphcurrentsize(graph);
hipSetDevice(0);
//val=1
hipSetDevice(0);
for(int kk=0;kk<graph.npoints;kk+=graphpointkernelblocks*TPB0){ hipLaunchKernelGGL((
reset), dim3(graphpointkernelblocks),dim3(TPB0), 0, 0, graph,kk);
}
hipDeviceSynchronize();
hipSetDevice(0);
hipDeviceSynchronize();
hipSetDevice(0);
/*TE=1*///GPU ASS
int falcvt1;
falcvt1=0;
struct struct_hgraph temp1;
hipMemcpy(&temp1,((struct struct_hgraph *)(graph.extra)),sizeof(struct struct_hgraph ),hipMemcpyDeviceToHost);
if(hipMemcpy(&(temp1.dist[0]),&(falcvt1),sizeof(int ),hipMemcpyHostToDevice)!=hipSuccess)printf("memcpyerror 1");
while(1) {
/*TE=1*///GPU ASS
int falcvt2;
falcvt2=0;
if(hipMemcpyToSymbol(changed,&(falcvt2),sizeof(int ),0,hipMemcpyHostToDevice)!=hipSuccess)printf("memcpyerror 2");//val=1
hipSetDevice(0);
for(int kk=0;kk<graph.npoints;kk+=graphpointkernelblocks*TPB0){ hipLaunchKernelGGL((
BFS), dim3(graphpointkernelblocks),dim3(TPB0), 0, 0, graph,kk);
}
hipDeviceSynchronize();
hipSetDevice(0);
hipDeviceSynchronize();
hipSetDevice(0);
for(int kk=1;kk<FALCsize;kk++){
#pragma omp parallel for num_threads(32)
for(int i=graph.offset[kk-1];i<graph.offset[kk];i++){
sendbuff(i,graph,FALCsendsize,FALCsendbuff,kk-1);
}
}
for(int i=0;i<FALCsize;i++){
if(i<FALCrank)
MPI_Isend((FALCsendbuff[i].vid), FALCsendsize[i], MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
if(i>FALCrank)
MPI_Isend((FALCsendbuff[i-1].vid), FALCsendsize[i-1], MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i-1]);
}
for(int i=0;i<FALCsize;i++){
if(i<FALCrank)
MPI_Recv(FALCrecvbuff[i].vid,graph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
if(i>FALCrank)
MPI_Recv(FALCrecvbuff[i-1].vid,graph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
}
FALCmsgno++;
for(int i=0;i<FALCsize;i++){
if(i<FALCrank)
MPI_Isend((FALCsendbuff[i].dist), FALCsendsize[i], MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
if(i>FALCrank)
MPI_Isend((FALCsendbuff[i-1].dist), FALCsendsize[i-1], MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i-1]);
}
for(int i=0;i<FALCsize;i++){
if(i<FALCrank)
MPI_Recv(FALCrecvbuff[i].dist,graph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,&FALCstatus[i]);
if(i>FALCrank)
MPI_Recv(FALCrecvbuff[i-1].dist,graph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,&FALCstatus[i-1]);
}
//changed should be synchronized as it is a global var
FALCmsgno++;
if(FALCrank!=0)for(int i=1;i< FALCsize;i++)MPI_Isend(&changed,1,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,&FALCrequest[i-1]);
if(FALCrank==0){
int tempchanged=0;
for(int i=1;i<FALCsize;i++){
MPI_Recv(&tempchanged,1,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
changed+=tempchanged;
}
FALCmsgno++;
for(int i=1;i< FALCsize;i++)MPI_Isend(&changed,1,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,&FALCrequest[i-1]);
}
else {
FALCmsgno++;
MPI_Recv(&changed,1,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
}
for(int kk=0;kk<(FALCsize-1);kk++){
MPI_Get_count(&FALCstatus[kk], MPI_INT, &FALCnamount);
#pragma omp parallel for num_threads(32)
for(int i=0;i<FALCnamount;i++){
int vertex= FALCrecvbuff[kk].vid[i];
if( ( ( struct struct_hgraph * )(graph.extra))->dist[vertex] > FALCrecvbuff[kk].dist[i])
( ( struct struct_hgraph * )(graph.extra))->dist[vertex] = FALCrecvbuff[kk].dist[i];
}
}
//here only master node of a point has updated value, sync it over all nodes needed. future work
for(int i=0;i<FALCsize;i++)FALCsendsize[i]=0;
/*TE=2*///GPU ASS
//val=2
//Dtype -1 -1=
if(hipMemcpyFromSymbol(&(hchanged),changed,sizeof(int ),0,hipMemcpyDeviceToHost)!=hipSuccess)printf("memcpyerror 3");
if( hchanged==0 )break;
}//end
/*TE=2*///GPU ASS
//val=2
struct struct_hgraph temp2;/*xx*/
hipSetDevice(0);
hipMemcpy(&temp2,((struct struct_hgraph *)(graph.extra)),sizeof(struct struct_hgraph ),hipMemcpyDeviceToHost);
if(hipMemcpy((((struct struct_hgraph *)(hgraph.extra))->dist),(temp2.dist),sizeof(int)*hgraph.npoints,hipMemcpyDeviceToHost)!=hipSuccess)printf("memcpyerror 4");
for (int i =0;i<hgraph.npoints;i++)printf("%d\n",/*xx*//***/((struct struct_hgraph *)(hgraph.extra))->dist[i]);
return ;
MPI_Finalize();
}//end fun 0
|
00fca0216f15e0db6529781fc5ce5ce6e91fb299.cu
|
#include "singlebfs.h"
cudaDeviceProp prop0;
// clustercomm.c LIB FUNCTION MIN 3rd ARG changed to be communicated
void FALCmpiinit(int argc,char **argv){
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &FALCrank);
MPI_Comm_size(MPI_COMM_WORLD, &FALCsize);
gethostname(FALChostname,255);
FALCsendbuff=(struct FALCbuffer *)malloc(sizeof(struct FALCbuffer )*FALCsize);
FALCrecvbuff=(struct FALCbuffer *)malloc(sizeof(struct FALCbuffer )*FALCsize);
FALCsendsize=(int *)malloc(sizeof(int)*FALCsize);
FALCrecvsize=(int *)malloc(sizeof(int)*FALCsize);
for(int i=0;i<FALCsize;i++){
FALCsendsize[i]=FALCrecvsize[i]=0;}
FALCstatus=(MPI_Status *)malloc(sizeof(MPI_Status)*FALCsize);
FALCrequest=(MPI_Request *)malloc(sizeof(MPI_Request)*FALCsize);
}
__device__ int changed =0;
;
int hchanged ;
__global__ void reset ( GGraph graph ,int FALCX)
{
/* 0 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.npoints){
((struct struct_hgraph *)(graph.extra))->dist[id]=1234567890;
}//end fun 0
}
__global__ void BFS ( GGraph graph ,int FALCX)
{
/* 0 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.npoints){
int ch ;
int falcft0=graph.index[id+1]-graph.index[id];
int falcft1=graph.index[id];
/*XX*/for(int falcft2=0;falcft2<falcft0;falcft2++){
int ut0=2*(falcft1+falcft2);
int ut1=graph.edges[ut0].ipe;
int ut2=graph.edges[ut0+1].ipe;
GMIN(&(((struct struct_hgraph *)(graph.extra))->dist/*xx*/[ut1]),((struct struct_hgraph *)(graph.extra))->dist[id]+1,/*xx*/changed);//rhs not null
}//foreach
}//end fun 0
}
int main ( int argc ,char * name [ ] )
{FALCmpiinit(argc,argv);
sprintf(partitionfile,"%s",argv[2]);
/*s1 0 0*/HGraph hgraph ;
hgraph.readPointsN(partitionfile,FALCsize);
hgraph.makeNPartitionsMPI(name[1],FALCrank,FALCsize);
int hosthgraph=0;
alloc_extra_hgraph(hgraph,hosthgraph,hgraph.npoints);
FALCallocbuff(FALCsendbuff,FALCsize,hgraph.remotepoints);
FALCallocbuff(FALCrecvbuff,FALCsize,hgraph.npoints);
;
GGraph graph;
/*TE=1*///GPU ASS
hgraph.cloneGPU(graph,0 );
int graphflag=0;
alloc_extra_graph(graph,graphflag,graph.npoints);
int TPB0=findthreadsperblock(&prop0);
int graphpointkernelblocks=findblocksize(graph,graph.npoints,TPB0);
int graphedgekernelblocks=findblocksize(graph,graph.nedges,TPB0);
copygraphcurrentsize(graph);
cudaSetDevice(0);
//val=1
cudaSetDevice(0);
for(int kk=0;kk<graph.npoints;kk+=graphpointkernelblocks*TPB0){
reset<<<graphpointkernelblocks,TPB0>>>(graph,kk);
}
cudaDeviceSynchronize();
cudaSetDevice(0);
cudaDeviceSynchronize();
cudaSetDevice(0);
/*TE=1*///GPU ASS
int falcvt1;
falcvt1=0;
struct struct_hgraph temp1;
cudaMemcpy(&temp1,((struct struct_hgraph *)(graph.extra)),sizeof(struct struct_hgraph ),cudaMemcpyDeviceToHost);
if(cudaMemcpy(&(temp1.dist[0]),&(falcvt1),sizeof(int ),cudaMemcpyHostToDevice)!=cudaSuccess)printf("memcpyerror 1");
while(1) {
/*TE=1*///GPU ASS
int falcvt2;
falcvt2=0;
if(cudaMemcpyToSymbol(changed,&(falcvt2),sizeof(int ),0,cudaMemcpyHostToDevice)!=cudaSuccess)printf("memcpyerror 2");//val=1
cudaSetDevice(0);
for(int kk=0;kk<graph.npoints;kk+=graphpointkernelblocks*TPB0){
BFS<<<graphpointkernelblocks,TPB0>>>(graph,kk);
}
cudaDeviceSynchronize();
cudaSetDevice(0);
cudaDeviceSynchronize();
cudaSetDevice(0);
for(int kk=1;kk<FALCsize;kk++){
#pragma omp parallel for num_threads(32)
for(int i=graph.offset[kk-1];i<graph.offset[kk];i++){
sendbuff(i,graph,FALCsendsize,FALCsendbuff,kk-1);
}
}
for(int i=0;i<FALCsize;i++){
if(i<FALCrank)
MPI_Isend((FALCsendbuff[i].vid), FALCsendsize[i], MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
if(i>FALCrank)
MPI_Isend((FALCsendbuff[i-1].vid), FALCsendsize[i-1], MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i-1]);
}
for(int i=0;i<FALCsize;i++){
if(i<FALCrank)
MPI_Recv(FALCrecvbuff[i].vid,graph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
if(i>FALCrank)
MPI_Recv(FALCrecvbuff[i-1].vid,graph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
}
FALCmsgno++;
for(int i=0;i<FALCsize;i++){
if(i<FALCrank)
MPI_Isend((FALCsendbuff[i].dist), FALCsendsize[i], MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
if(i>FALCrank)
MPI_Isend((FALCsendbuff[i-1].dist), FALCsendsize[i-1], MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i-1]);
}
for(int i=0;i<FALCsize;i++){
if(i<FALCrank)
MPI_Recv(FALCrecvbuff[i].dist,graph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,&FALCstatus[i]);
if(i>FALCrank)
MPI_Recv(FALCrecvbuff[i-1].dist,graph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,&FALCstatus[i-1]);
}
//changed should be synchronized as it is a global var
FALCmsgno++;
if(FALCrank!=0)for(int i=1;i< FALCsize;i++)MPI_Isend(&changed,1,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,&FALCrequest[i-1]);
if(FALCrank==0){
int tempchanged=0;
for(int i=1;i<FALCsize;i++){
MPI_Recv(&tempchanged,1,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
changed+=tempchanged;
}
FALCmsgno++;
for(int i=1;i< FALCsize;i++)MPI_Isend(&changed,1,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,&FALCrequest[i-1]);
}
else {
FALCmsgno++;
MPI_Recv(&changed,1,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
}
for(int kk=0;kk<(FALCsize-1);kk++){
MPI_Get_count(&FALCstatus[kk], MPI_INT, &FALCnamount);
#pragma omp parallel for num_threads(32)
for(int i=0;i<FALCnamount;i++){
int vertex= FALCrecvbuff[kk].vid[i];
if( ( ( struct struct_hgraph * )(graph.extra))->dist[vertex] > FALCrecvbuff[kk].dist[i])
( ( struct struct_hgraph * )(graph.extra))->dist[vertex] = FALCrecvbuff[kk].dist[i];
}
}
//here only master node of a point has updated value, sync it over all nodes needed. future work
for(int i=0;i<FALCsize;i++)FALCsendsize[i]=0;
/*TE=2*///GPU ASS
//val=2
//Dtype -1 -1=
if(cudaMemcpyFromSymbol(&(hchanged),changed,sizeof(int ),0,cudaMemcpyDeviceToHost)!=cudaSuccess)printf("memcpyerror 3");
if( hchanged==0 )break;
}//end
/*TE=2*///GPU ASS
//val=2
struct struct_hgraph temp2;/*xx*/
cudaSetDevice(0);
cudaMemcpy(&temp2,((struct struct_hgraph *)(graph.extra)),sizeof(struct struct_hgraph ),cudaMemcpyDeviceToHost);
if(cudaMemcpy((((struct struct_hgraph *)(hgraph.extra))->dist),(temp2.dist),sizeof(int)*hgraph.npoints,cudaMemcpyDeviceToHost)!=cudaSuccess)printf("memcpyerror 4");
for (int i =0;i<hgraph.npoints;i++)printf("%d\n",/*xx*//***/((struct struct_hgraph *)(hgraph.extra))->dist[i]);
return ;
MPI_Finalize();
}//end fun 0
|
a21103c7aa685b0f62bd75ad7727e4c3e08f47a6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void gpu_Actualizar(float *layer, int posicion, float energia,int layer_size) {
float umbral = 0.001;
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
if(gid < layer_size){
int distancia = posicion - gid;
if ( distancia < 0 ) distancia = - distancia;
distancia = distancia + 1;
float atenuacion = sqrtf( (float)distancia );
float energia_k = energia / atenuacion;
if ( energia_k >= umbral || energia_k <= -umbral ) layer[gid] = layer[gid] + energia_k;
}
}
__global__ void gpu_Copiar(float *layer, float *layer_copy,int layer_size) {
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
if(gid < layer_size) layer_copy[gid]=layer[gid];
}
__global__ void gpu_Relajacion(float *layer, float *layer_copy, int layer_size) {
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
if(gid>0 && gid < layer_size-1) layer[gid] = ( layer_copy[gid-1] + layer_copy[gid] + layer_copy[gid+1] ) / 3;
}
__global__ void gpu_reduceMaximo(float* g_candidatos, float* positions, int size){
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
int s = size/2;
if ( gid >= size/2) return;
if(g_candidatos[ gid ] < g_candidatos[ gid + s]) {
g_candidatos[ gid ] = g_candidatos[ s + gid ];
positions[gid] = positions[gid+s];
}
// Extra element
if ( size%2 != 0 && gid == 0 ){
if(g_candidatos[ 0 ] < g_candidatos[ size-1 ]) {
g_candidatos[ 0 ] = g_candidatos[ size-1 ];
positions[0] = size-1;
}
}
}
__global__ void gpu_obtenCandidatos (float *layer, float *candidatos, int layer_size ){
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
if (gid > layer_size) return;
candidatos[gid] = 0;
if (gid == 0 || gid == layer_size-1) return;
if (layer[gid]>layer[gid-1] && layer[gid] > layer[gid+1]) candidatos[gid] = layer[gid];
}
|
a21103c7aa685b0f62bd75ad7727e4c3e08f47a6.cu
|
__global__ void gpu_Actualizar(float *layer, int posicion, float energia,int layer_size) {
float umbral = 0.001;
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
if(gid < layer_size){
int distancia = posicion - gid;
if ( distancia < 0 ) distancia = - distancia;
distancia = distancia + 1;
float atenuacion = sqrtf( (float)distancia );
float energia_k = energia / atenuacion;
if ( energia_k >= umbral || energia_k <= -umbral ) layer[gid] = layer[gid] + energia_k;
}
}
__global__ void gpu_Copiar(float *layer, float *layer_copy,int layer_size) {
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
if(gid < layer_size) layer_copy[gid]=layer[gid];
}
__global__ void gpu_Relajacion(float *layer, float *layer_copy, int layer_size) {
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
if(gid>0 && gid < layer_size-1) layer[gid] = ( layer_copy[gid-1] + layer_copy[gid] + layer_copy[gid+1] ) / 3;
}
__global__ void gpu_reduceMaximo(float* g_candidatos, float* positions, int size){
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
int s = size/2;
if ( gid >= size/2) return;
if(g_candidatos[ gid ] < g_candidatos[ gid + s]) {
g_candidatos[ gid ] = g_candidatos[ s + gid ];
positions[gid] = positions[gid+s];
}
// Extra element
if ( size%2 != 0 && gid == 0 ){
if(g_candidatos[ 0 ] < g_candidatos[ size-1 ]) {
g_candidatos[ 0 ] = g_candidatos[ size-1 ];
positions[0] = size-1;
}
}
}
__global__ void gpu_obtenCandidatos (float *layer, float *candidatos, int layer_size ){
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
if (gid > layer_size) return;
candidatos[gid] = 0;
if (gid == 0 || gid == layer_size-1) return;
if (layer[gid]>layer[gid-1] && layer[gid] > layer[gid+1]) candidatos[gid] = layer[gid];
}
|
834c9a09a6046372c7e9017a6c10b31bfc8409e6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
AIJCUSPARSE methods implemented with Cuda kernels. Uses cuSparse/Thrust maps from AIJCUSPARSE
*/
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <petscconf.h>
#include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
#include <../src/mat/impls/sbaij/seq/sbaij.h>
#undef VecType
#include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h>
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ > 600 && PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
#define AIJBANDUSEGROUPS 1
#endif
#if defined(AIJBANDUSEGROUPS)
#include <hip/hip_cooperative_groups.h>
#endif
/*
LU BAND factorization with optimization for block diagonal (Nf blocks) in natural order (-mat_no_inode -pc_factor_mat_ordering_type rcm with Nf>1 fields)
requires:
structurally symmetric: fix with transpose/column meta data
*/
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSEBAND(Mat, Mat, IS, IS, const MatFactorInfo *);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSEBAND(Mat, Mat, const MatFactorInfo *);
/*
The GPU LU factor kernel
*/
__global__ void __launch_bounds__(1024, 1) mat_lu_factor_band_init_set_i(const PetscInt n, const int bw, int bi_csr[])
{
const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n / Nf;
const PetscInt field = blockIdx.x, blkIdx = blockIdx.y;
const PetscInt nloc_i = (nloc / Nblk + !!(nloc % Nblk)), start_i = field * nloc + blkIdx * nloc_i, end_i = (start_i + nloc_i) > (field + 1) * nloc ? (field + 1) * nloc : (start_i + nloc_i);
// set i (row+1)
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) bi_csr[0] = 0; // dummy at zero
for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y
if (rowb < end_i && threadIdx.x == 0) {
PetscInt i = rowb + 1, ni = (rowb > bw) ? bw + 1 : i, n1L = ni * (ni - 1) / 2, nug = i * bw, n2L = bw * ((rowb > bw) ? (rowb - bw) : 0), mi = bw + rowb + 1 - n, clip = (mi > 0) ? mi * (mi - 1) / 2 + mi : 0;
bi_csr[rowb + 1] = n1L + nug - clip + n2L + i;
}
}
}
// copy AIJ to AIJ_BAND
__global__ void __launch_bounds__(1024, 1) mat_lu_factor_band_copy_aij_aij(const PetscInt n, const int bw, const PetscInt r[], const PetscInt ic[], const int ai_d[], const int aj_d[], const PetscScalar aa_d[], const int bi_csr[], PetscScalar ba_csr[])
{
const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n / Nf;
const PetscInt field = blockIdx.x, blkIdx = blockIdx.y;
const PetscInt nloc_i = (nloc / Nblk + !!(nloc % Nblk)), start_i = field * nloc + blkIdx * nloc_i, end_i = (start_i + nloc_i) > (field + 1) * nloc ? (field + 1) * nloc : (start_i + nloc_i);
// zero B
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) ba_csr[bi_csr[n]] = 0; // flop count at end
for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y
if (rowb < end_i) {
PetscScalar *batmp = ba_csr + bi_csr[rowb];
const PetscInt nzb = bi_csr[rowb + 1] - bi_csr[rowb];
for (int j = threadIdx.x; j < nzb; j += blockDim.x) {
if (j < nzb) batmp[j] = 0;
}
}
}
// copy A into B with CSR format -- these two loops can be fused
for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y
if (rowb < end_i) {
const PetscInt rowa = r[rowb], nza = ai_d[rowa + 1] - ai_d[rowa];
const int *ajtmp = aj_d + ai_d[rowa], bjStart = (rowb > bw) ? rowb - bw : 0;
const PetscScalar *av = aa_d + ai_d[rowa];
PetscScalar *batmp = ba_csr + bi_csr[rowb];
/* load in initial (unfactored row) */
for (int j = threadIdx.x; j < nza; j += blockDim.x) {
if (j < nza) {
PetscInt colb = ic[ajtmp[j]], idx = colb - bjStart;
PetscScalar vala = av[j];
batmp[idx] = vala;
}
}
}
}
}
// print AIJ_BAND
__global__ void print_mat_aij_band(const PetscInt n, const int bi_csr[], const PetscScalar ba_csr[])
{
// debug
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) {
printf("B (AIJ) n=%d:\n", (int)n);
for (int rowb = 0; rowb < n; rowb++) {
const PetscInt nz = bi_csr[rowb + 1] - bi_csr[rowb];
const PetscScalar *batmp = ba_csr + bi_csr[rowb];
for (int j = 0; j < nz; j++) printf("(%13.6e) ", PetscRealPart(batmp[j]));
printf(" bi=%d\n", bi_csr[rowb + 1]);
}
}
}
// Band LU kernel --- ba_csr bi_csr
__global__ void __launch_bounds__(1024, 1) mat_lu_factor_band(const PetscInt n, const PetscInt bw, const int bi_csr[], PetscScalar ba_csr[], int *use_group_sync)
{
const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n / Nf;
const PetscInt field = blockIdx.x, blkIdx = blockIdx.y;
const PetscInt start = field * nloc, end = start + nloc;
#if defined(AIJBANDUSEGROUPS)
auto g = cooperative_groups::this_grid();
#endif
// A22 panel update for each row A(1,:) and col A(:,1)
for (int glbDD = start, locDD = 0; glbDD < end; glbDD++, locDD++) {
PetscInt tnzUd = bw, maxU = end - 1 - glbDD; // we are chopping off the inter ears
const PetscInt nzUd = (tnzUd > maxU) ? maxU : tnzUd, dOffset = (glbDD > bw) ? bw : glbDD; // global to go past ears after first
PetscScalar *pBdd = ba_csr + bi_csr[glbDD] + dOffset;
const PetscScalar *baUd = pBdd + 1; // vector of data U(i,i+1:end)
const PetscScalar Bdd = *pBdd;
const PetscInt offset = blkIdx * blockDim.y + threadIdx.y, inc = Nblk * blockDim.y;
if (threadIdx.x == 0) {
for (int idx = offset, myi = glbDD + offset + 1; idx < nzUd; idx += inc, myi += inc) { /* assuming symmetric structure */
const PetscInt bwi = myi > bw ? bw : myi, kIdx = bwi - (myi - glbDD); // cuts off just the first (global) block
PetscScalar *Aid = ba_csr + bi_csr[myi] + kIdx;
*Aid = *Aid / Bdd;
}
}
__syncthreads(); // synch on threadIdx.x only
for (int idx = offset, myi = glbDD + offset + 1; idx < nzUd; idx += inc, myi += inc) {
const PetscInt bwi = myi > bw ? bw : myi, kIdx = bwi - (myi - glbDD); // cuts off just the first (global) block
PetscScalar *Aid = ba_csr + bi_csr[myi] + kIdx;
PetscScalar *Aij = Aid + 1;
const PetscScalar Lid = *Aid;
for (int jIdx = threadIdx.x; jIdx < nzUd; jIdx += blockDim.x) Aij[jIdx] -= Lid * baUd[jIdx];
}
#if defined(AIJBANDUSEGROUPS)
if (use_group_sync) {
g.sync();
} else {
__syncthreads();
}
#else
__syncthreads();
#endif
} /* endof for (i=0; i<n; i++) { */
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSEBAND(Mat, Vec, Vec);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSEBAND(Mat B, Mat A, const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr;
PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors");
Mat_SeqAIJCUSPARSE *cusparsestructA = (Mat_SeqAIJCUSPARSE *)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstructA;
CsrMatrix *matrixA;
const PetscInt n = A->rmap->n, *ic, *r;
const int *ai_d, *aj_d;
const PetscScalar *aa_d;
PetscScalar *ba_t = cusparseTriFactors->a_band_d;
int *bi_t = cusparseTriFactors->i_band_d;
int Ni = 10, team_size = 9, Nf = 1, nVec = 56, nconcurrent = 1, nsm = -1; // Nf is batch size - not used
PetscFunctionBegin;
if (A->rmap->n == 0) PetscFunctionReturn(0);
// cusparse setup
PetscCheck(cusparsestructA, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparsestructA");
matstructA = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestructA->mat; // matstruct->cprowIndices
PetscCheck(matstructA, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Missing mat struct");
matrixA = (CsrMatrix *)matstructA->mat;
PetscCheck(matrixA, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Missing matrix cusparsestructA->mat->mat");
// get data
ic = thrust::raw_pointer_cast(cusparseTriFactors->cpermIndices->data());
ai_d = thrust::raw_pointer_cast(matrixA->row_offsets->data());
aj_d = thrust::raw_pointer_cast(matrixA->column_indices->data());
aa_d = thrust::raw_pointer_cast(matrixA->values->data().get());
r = thrust::raw_pointer_cast(cusparseTriFactors->rpermIndices->data());
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogGpuTimeBegin());
{
int bw = (int)(2. * (double)n - 1. - (double)(PetscSqrtReal(1. + 4. * ((double)n * (double)n - (double)b->nz)) + PETSC_MACHINE_EPSILON)) / 2, bm1 = bw - 1, nl = n / Nf;
#if !defined(AIJBANDUSEGROUPS)
Ni = 1 / nconcurrent;
Ni = 1;
#else
if (!cusparseTriFactors->init_dev_prop) {
int gpuid;
cusparseTriFactors->init_dev_prop = PETSC_TRUE;
hipGetDevice(&gpuid);
hipGetDeviceProperties(&cusparseTriFactors->dev_prop, gpuid);
}
nsm = cusparseTriFactors->dev_prop.multiProcessorCount;
Ni = nsm / Nf / nconcurrent;
#endif
team_size = bw / Ni + !!(bw % Ni);
nVec = PetscMin(bw, 1024 / team_size);
PetscCall(PetscInfo(A, "Matrix Bandwidth = %d, number SMs/block = %d, num concurency = %d, num fields = %d, numSMs/GPU = %d, thread group size = %d,%d\n", bw, Ni, nconcurrent, Nf, nsm, team_size, nVec));
{
dim3 dimBlockTeam(nVec, team_size);
dim3 dimBlockLeague(Nf, Ni);
hipLaunchKernelGGL(( mat_lu_factor_band_copy_aij_aij), dim3(dimBlockLeague), dim3(dimBlockTeam), 0, 0, n, bw, r, ic, ai_d, aj_d, aa_d, bi_t, ba_t);
PetscCUDACheckLaunch; // does a sync
#if defined(AIJBANDUSEGROUPS)
if (Ni > 1) {
void *kernelArgs[] = {(void *)&n, (void *)&bw, (void *)&bi_t, (void *)&ba_t, (void *)&nsm};
hipLaunchCooperativeKernel((void *)mat_lu_factor_band, dimBlockLeague, dimBlockTeam, kernelArgs, 0, NULL);
} else {
hipLaunchKernelGGL(( mat_lu_factor_band), dim3(dimBlockLeague), dim3(dimBlockTeam), 0, 0, n, bw, bi_t, ba_t, NULL);
}
#else
hipLaunchKernelGGL(( mat_lu_factor_band), dim3(dimBlockLeague), dim3(dimBlockTeam), 0, 0, n, bw, bi_t, ba_t, NULL);
#endif
PetscCUDACheckLaunch; // does a sync
#if defined(PETSC_USE_LOG)
PetscCall(PetscLogGpuFlops((PetscLogDouble)Nf * (bm1 * (bm1 + 1) * (PetscLogDouble)(2 * bm1 + 1) / 3 + (PetscLogDouble)2 * (nl - bw) * bw * bw + (PetscLogDouble)nl * (nl + 1) / 2)));
#endif
}
}
PetscCall(PetscLogGpuTimeEnd());
/* determine which version of MatSolve needs to be used. from MatLUFactorNumeric_AIJ_SeqAIJCUSPARSE */
B->ops->solve = MatSolve_SeqAIJCUSPARSEBAND;
B->ops->solvetranspose = NULL; // need transpose
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
PetscFunctionReturn(0);
}
PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSEBAND(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data, *b;
IS isicol;
const PetscInt *ic, *ai = a->i, *aj = a->j;
PetscScalar *ba_t;
int *bi_t;
PetscInt i, n = A->rmap->n, Nf = 1; // Nf batch size - not used
PetscInt nzBcsr, bwL, bwU;
PetscBool missing;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr;
PetscFunctionBegin;
PetscCheck(A->rmap->N == A->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "matrix must be square");
PetscCall(MatMissingDiagonal(A, &missing, &i));
PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i);
PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "!cusparseTriFactors");
PetscCall(MatIsStructurallySymmetric(A, &missing));
PetscCheck(missing, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "only structurally symmetric matrices supported");
PetscCall(ISInvertPermutation(iscol, PETSC_DECIDE, &isicol));
PetscCall(ISGetIndices(isicol, &ic));
PetscCall(MatSeqAIJSetPreallocation_SeqAIJ(B, MAT_SKIP_ALLOCATION, NULL));
b = (Mat_SeqAIJ *)(B)->data;
/* get band widths, MatComputeBandwidth should take a reordering ic and do this */
bwL = bwU = 0;
for (int rwb = 0; rwb < n; rwb++) {
const PetscInt rwa = ic[rwb], anz = ai[rwb + 1] - ai[rwb], *ajtmp = aj + ai[rwb];
for (int j = 0; j < anz; j++) {
PetscInt colb = ic[ajtmp[j]];
if (colb < rwa) { // L
if (rwa - colb > bwL) bwL = rwa - colb;
} else {
if (colb - rwa > bwU) bwU = colb - rwa;
}
}
}
PetscCall(ISRestoreIndices(isicol, &ic));
/* only support structurally symmetric, but it might work */
PetscCheck(bwL == bwU, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Only symmetric structure supported (now) W_L=%" PetscInt_FMT " W_U=%" PetscInt_FMT, bwL, bwU);
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors));
nzBcsr = n + (2 * n - 1) * bwU - bwU * bwU;
b->maxnz = b->nz = nzBcsr;
cusparseTriFactors->nnz = b->nz; // only meta data needed: n & nz
PetscCall(PetscInfo(A, "Matrix Bandwidth = %" PetscInt_FMT ", nnz = %" PetscInt_FMT "\n", bwL, b->nz));
if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n);
PetscCallCUDA(hipMalloc(&ba_t, (b->nz + 1) * sizeof(PetscScalar))); // include a place for flops
PetscCallCUDA(hipMalloc(&bi_t, (n + 1) * sizeof(int)));
cusparseTriFactors->a_band_d = ba_t;
cusparseTriFactors->i_band_d = bi_t;
/* In b structure: Free imax, ilen, old a, old j. Allocate solve_work, new a, new j */
{
dim3 dimBlockTeam(1, 128);
dim3 dimBlockLeague(Nf, 1);
hipLaunchKernelGGL(( mat_lu_factor_band_init_set_i), dim3(dimBlockLeague), dim3(dimBlockTeam), 0, 0, n, bwU, bi_t);
}
PetscCUDACheckLaunch; // does a sync
// setup data
if (!cusparseTriFactors->rpermIndices) {
const PetscInt *r;
PetscCall(ISGetIndices(isrow, &r));
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(r, r + n);
PetscCall(ISRestoreIndices(isrow, &r));
PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt)));
}
/* upper triangular indices */
if (!cusparseTriFactors->cpermIndices) {
const PetscInt *c;
PetscCall(ISGetIndices(isicol, &c));
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(c, c + n);
PetscCall(ISRestoreIndices(isicol, &c));
PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt)));
}
/* put together the new matrix */
b->free_a = PETSC_FALSE;
b->free_ij = PETSC_FALSE;
b->singlemalloc = PETSC_FALSE;
b->ilen = NULL;
b->imax = NULL;
b->row = isrow;
b->col = iscol;
PetscCall(PetscObjectReference((PetscObject)isrow));
PetscCall(PetscObjectReference((PetscObject)iscol));
b->icol = isicol;
PetscCall(PetscMalloc1(n + 1, &b->solve_work));
B->factortype = MAT_FACTOR_LU;
B->info.factor_mallocs = 0;
B->info.fill_ratio_given = 0;
if (ai[n]) {
B->info.fill_ratio_needed = ((PetscReal)(nzBcsr)) / ((PetscReal)ai[n]);
} else {
B->info.fill_ratio_needed = 0.0;
}
#if defined(PETSC_USE_INFO)
if (ai[n] != 0) {
PetscReal af = B->info.fill_ratio_needed;
PetscCall(PetscInfo(A, "Band fill ratio %g\n", (double)af));
} else {
PetscCall(PetscInfo(A, "Empty matrix\n"));
}
#endif
if (a->inode.size) PetscCall(PetscInfo(A, "Warning: using inodes in band solver.\n"));
PetscCall(MatSeqAIJCheckInode_FactorLU(B));
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSEBAND;
B->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
/* Use -pc_factor_mat_solver_type cusparseband */
PetscErrorCode MatFactorGetSolverType_seqaij_cusparse_band(Mat A, MatSolverType *type)
{
PetscFunctionBegin;
*type = MATSOLVERCUSPARSEBAND;
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat A, MatFactorType ftype, Mat *B)
{
PetscInt n = A->rmap->n;
PetscFunctionBegin;
PetscCall(MatCreate(PetscObjectComm((PetscObject)A), B));
PetscCall(MatSetSizes(*B, n, n, n, n));
(*B)->factortype = ftype;
(*B)->canuseordering = PETSC_TRUE;
PetscCall(MatSetType(*B, MATSEQAIJCUSPARSE));
if (ftype == MAT_FACTOR_LU) {
PetscCall(MatSetBlockSizesFromMats(*B, A, A));
(*B)->ops->ilufactorsymbolic = NULL; // MatILUFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSEBAND;
PetscCall(PetscStrallocpy(MATORDERINGRCM, (char **)&(*B)->preferredordering[MAT_FACTOR_LU]));
} else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Factor type not supported for CUSPARSEBAND Matrix Types");
PetscCall(MatSeqAIJSetPreallocation(*B, MAT_SKIP_ALLOCATION, NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)(*B), "MatFactorGetSolverType_C", MatFactorGetSolverType_seqaij_cusparse_band));
PetscFunctionReturn(0);
}
#define WARP_SIZE 32
template <typename T>
__forceinline__ __device__ T wreduce(T a)
{
T b;
#pragma unroll
for (int i = WARP_SIZE / 2; i >= 1; i = i >> 1) {
b = __shfl_down_sync(0xffffffff, a, i);
a += b;
}
return a;
}
// reduce in a block, returns result in thread 0
template <typename T, int BLOCK_SIZE>
__device__ T breduce(T a)
{
constexpr int NWARP = BLOCK_SIZE / WARP_SIZE;
__shared__ double buf[NWARP];
int wid = threadIdx.x / WARP_SIZE;
int laneid = threadIdx.x % WARP_SIZE;
T b = wreduce<T>(a);
if (laneid == 0) buf[wid] = b;
__syncthreads();
if (wid == 0) {
if (threadIdx.x < NWARP) a = buf[threadIdx.x];
else a = 0;
for (int i = (NWARP + 1) / 2; i >= 1; i = i >> 1) a += __shfl_down_sync(0xffffffff, a, i);
}
return a;
}
// Band LU kernel --- ba_csr bi_csr
template <int BLOCK_SIZE>
__global__ void __launch_bounds__(256, 1) mat_solve_band(const PetscInt n, const PetscInt bw, const PetscScalar ba_csr[], PetscScalar x[])
{
const PetscInt Nf = gridDim.x, nloc = n / Nf, field = blockIdx.x, start = field * nloc, end = start + nloc, chopnz = bw * (bw + 1) / 2, blocknz = (2 * bw + 1) * nloc, blocknz_0 = blocknz - chopnz;
const PetscScalar *pLi;
const int tid = threadIdx.x;
/* Next, solve L */
pLi = ba_csr + (field == 0 ? 0 : blocknz_0 + (field - 1) * blocknz + bw); // diagonal (0,0) in field
for (int glbDD = start, locDD = 0; glbDD < end; glbDD++, locDD++) {
const PetscInt col = locDD < bw ? start : (glbDD - bw);
PetscScalar t = 0;
for (int j = col + tid, idx = tid; j < glbDD; j += blockDim.x, idx += blockDim.x) t += pLi[idx] * x[j];
#if defined(PETSC_USE_COMPLEX)
PetscReal tr = PetscRealPartComplex(t), ti = PetscImaginaryPartComplex(t);
PetscScalar tt(breduce<PetscReal, BLOCK_SIZE>(tr), breduce<PetscReal, BLOCK_SIZE>(ti));
t = tt;
#else
t = breduce<PetscReal, BLOCK_SIZE>(t);
#endif
if (threadIdx.x == 0) x[glbDD] -= t; // /1.0
__syncthreads();
// inc
pLi += glbDD - col; // get to diagonal
if (glbDD > n - 1 - bw) pLi += n - 1 - glbDD; // skip over U, only last block has funny offset
else pLi += bw;
pLi += 1; // skip to next row
if (field > 0 && (locDD + 1) < bw) pLi += bw - (locDD + 1); // skip padding at beginning (ear)
}
/* Then, solve U */
pLi = ba_csr + Nf * blocknz - 2 * chopnz - 1; // end of real data on block (diagonal)
if (field != Nf - 1) pLi -= blocknz_0 + (Nf - 2 - field) * blocknz + bw; // diagonal of last local row
for (int glbDD = end - 1, locDD = 0; glbDD >= start; glbDD--, locDD++) {
const PetscInt col = (locDD < bw) ? end - 1 : glbDD + bw; // end of row in U
PetscScalar t = 0;
for (int j = col - tid, idx = tid; j > glbDD; j -= blockDim.x, idx += blockDim.x) t += pLi[-idx] * x[j];
#if defined(PETSC_USE_COMPLEX)
PetscReal tr = PetscRealPartComplex(t), ti = PetscImaginaryPartComplex(t);
PetscScalar tt(breduce<PetscReal, BLOCK_SIZE>(tr), breduce<PetscReal, BLOCK_SIZE>(ti));
t = tt;
#else
t = breduce<PetscReal, BLOCK_SIZE>(PetscRealPart(t));
#endif
pLi -= col - glbDD; // diagonal
if (threadIdx.x == 0) {
x[glbDD] -= t;
x[glbDD] /= pLi[0];
}
__syncthreads();
// inc past L to start of previous U
pLi -= bw + 1;
if (glbDD < bw) pLi += bw - glbDD; // overshot in top left corner
if (((locDD + 1) < bw) && field != Nf - 1) pLi -= (bw - (locDD + 1)); // skip past right corner
}
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSEBAND(Mat A, Vec bb, Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector;
PetscInt n = A->rmap->n, nz = cusparseTriFactors->nnz, Nf = 1; // Nf is batch size - not used
PetscInt bw = (int)(2. * (double)n - 1. - (double)(PetscSqrtReal(1. + 4. * ((double)n * (double)n - (double)nz)) + PETSC_MACHINE_EPSILON)) / 2; // quadric formula for bandwidth
PetscFunctionBegin;
if (A->rmap->n == 0) PetscFunctionReturn(0);
/* Get the GPU pointers */
PetscCall(VecCUDAGetArrayWrite(xx, &xarray));
PetscCall(VecCUDAGetArrayRead(bb, &barray));
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
PetscCall(PetscLogGpuTimeBegin());
/* First, reorder with the row permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin());
constexpr int block = 128;
hipLaunchKernelGGL(( mat_solve_band<block>), dim3(Nf), dim3(block), 0, 0, n, bw, cusparseTriFactors->a_band_d, tempGPU->data().get());
PetscCUDACheckLaunch; // does a sync
/* Last, reorder with the column permutation */
thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU);
PetscCall(VecCUDARestoreArrayRead(bb, &barray));
PetscCall(VecCUDARestoreArrayWrite(xx, &xarray));
PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n));
PetscCall(PetscLogGpuTimeEnd());
PetscFunctionReturn(0);
}
|
834c9a09a6046372c7e9017a6c10b31bfc8409e6.cu
|
/*
AIJCUSPARSE methods implemented with Cuda kernels. Uses cuSparse/Thrust maps from AIJCUSPARSE
*/
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <petscconf.h>
#include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
#include <../src/mat/impls/sbaij/seq/sbaij.h>
#undef VecType
#include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h>
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ > 600 && PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
#define AIJBANDUSEGROUPS 1
#endif
#if defined(AIJBANDUSEGROUPS)
#include <cooperative_groups.h>
#endif
/*
LU BAND factorization with optimization for block diagonal (Nf blocks) in natural order (-mat_no_inode -pc_factor_mat_ordering_type rcm with Nf>1 fields)
requires:
structurally symmetric: fix with transpose/column meta data
*/
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSEBAND(Mat, Mat, IS, IS, const MatFactorInfo *);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSEBAND(Mat, Mat, const MatFactorInfo *);
/*
The GPU LU factor kernel
*/
__global__ void __launch_bounds__(1024, 1) mat_lu_factor_band_init_set_i(const PetscInt n, const int bw, int bi_csr[])
{
const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n / Nf;
const PetscInt field = blockIdx.x, blkIdx = blockIdx.y;
const PetscInt nloc_i = (nloc / Nblk + !!(nloc % Nblk)), start_i = field * nloc + blkIdx * nloc_i, end_i = (start_i + nloc_i) > (field + 1) * nloc ? (field + 1) * nloc : (start_i + nloc_i);
// set i (row+1)
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) bi_csr[0] = 0; // dummy at zero
for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y
if (rowb < end_i && threadIdx.x == 0) {
PetscInt i = rowb + 1, ni = (rowb > bw) ? bw + 1 : i, n1L = ni * (ni - 1) / 2, nug = i * bw, n2L = bw * ((rowb > bw) ? (rowb - bw) : 0), mi = bw + rowb + 1 - n, clip = (mi > 0) ? mi * (mi - 1) / 2 + mi : 0;
bi_csr[rowb + 1] = n1L + nug - clip + n2L + i;
}
}
}
// copy AIJ to AIJ_BAND
__global__ void __launch_bounds__(1024, 1) mat_lu_factor_band_copy_aij_aij(const PetscInt n, const int bw, const PetscInt r[], const PetscInt ic[], const int ai_d[], const int aj_d[], const PetscScalar aa_d[], const int bi_csr[], PetscScalar ba_csr[])
{
const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n / Nf;
const PetscInt field = blockIdx.x, blkIdx = blockIdx.y;
const PetscInt nloc_i = (nloc / Nblk + !!(nloc % Nblk)), start_i = field * nloc + blkIdx * nloc_i, end_i = (start_i + nloc_i) > (field + 1) * nloc ? (field + 1) * nloc : (start_i + nloc_i);
// zero B
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) ba_csr[bi_csr[n]] = 0; // flop count at end
for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y
if (rowb < end_i) {
PetscScalar *batmp = ba_csr + bi_csr[rowb];
const PetscInt nzb = bi_csr[rowb + 1] - bi_csr[rowb];
for (int j = threadIdx.x; j < nzb; j += blockDim.x) {
if (j < nzb) batmp[j] = 0;
}
}
}
// copy A into B with CSR format -- these two loops can be fused
for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y
if (rowb < end_i) {
const PetscInt rowa = r[rowb], nza = ai_d[rowa + 1] - ai_d[rowa];
const int *ajtmp = aj_d + ai_d[rowa], bjStart = (rowb > bw) ? rowb - bw : 0;
const PetscScalar *av = aa_d + ai_d[rowa];
PetscScalar *batmp = ba_csr + bi_csr[rowb];
/* load in initial (unfactored row) */
for (int j = threadIdx.x; j < nza; j += blockDim.x) {
if (j < nza) {
PetscInt colb = ic[ajtmp[j]], idx = colb - bjStart;
PetscScalar vala = av[j];
batmp[idx] = vala;
}
}
}
}
}
// print AIJ_BAND
__global__ void print_mat_aij_band(const PetscInt n, const int bi_csr[], const PetscScalar ba_csr[])
{
// debug
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) {
printf("B (AIJ) n=%d:\n", (int)n);
for (int rowb = 0; rowb < n; rowb++) {
const PetscInt nz = bi_csr[rowb + 1] - bi_csr[rowb];
const PetscScalar *batmp = ba_csr + bi_csr[rowb];
for (int j = 0; j < nz; j++) printf("(%13.6e) ", PetscRealPart(batmp[j]));
printf(" bi=%d\n", bi_csr[rowb + 1]);
}
}
}
// Band LU kernel --- ba_csr bi_csr
__global__ void __launch_bounds__(1024, 1) mat_lu_factor_band(const PetscInt n, const PetscInt bw, const int bi_csr[], PetscScalar ba_csr[], int *use_group_sync)
{
const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n / Nf;
const PetscInt field = blockIdx.x, blkIdx = blockIdx.y;
const PetscInt start = field * nloc, end = start + nloc;
#if defined(AIJBANDUSEGROUPS)
auto g = cooperative_groups::this_grid();
#endif
// A22 panel update for each row A(1,:) and col A(:,1)
for (int glbDD = start, locDD = 0; glbDD < end; glbDD++, locDD++) {
PetscInt tnzUd = bw, maxU = end - 1 - glbDD; // we are chopping off the inter ears
const PetscInt nzUd = (tnzUd > maxU) ? maxU : tnzUd, dOffset = (glbDD > bw) ? bw : glbDD; // global to go past ears after first
PetscScalar *pBdd = ba_csr + bi_csr[glbDD] + dOffset;
const PetscScalar *baUd = pBdd + 1; // vector of data U(i,i+1:end)
const PetscScalar Bdd = *pBdd;
const PetscInt offset = blkIdx * blockDim.y + threadIdx.y, inc = Nblk * blockDim.y;
if (threadIdx.x == 0) {
for (int idx = offset, myi = glbDD + offset + 1; idx < nzUd; idx += inc, myi += inc) { /* assuming symmetric structure */
const PetscInt bwi = myi > bw ? bw : myi, kIdx = bwi - (myi - glbDD); // cuts off just the first (global) block
PetscScalar *Aid = ba_csr + bi_csr[myi] + kIdx;
*Aid = *Aid / Bdd;
}
}
__syncthreads(); // synch on threadIdx.x only
for (int idx = offset, myi = glbDD + offset + 1; idx < nzUd; idx += inc, myi += inc) {
const PetscInt bwi = myi > bw ? bw : myi, kIdx = bwi - (myi - glbDD); // cuts off just the first (global) block
PetscScalar *Aid = ba_csr + bi_csr[myi] + kIdx;
PetscScalar *Aij = Aid + 1;
const PetscScalar Lid = *Aid;
for (int jIdx = threadIdx.x; jIdx < nzUd; jIdx += blockDim.x) Aij[jIdx] -= Lid * baUd[jIdx];
}
#if defined(AIJBANDUSEGROUPS)
if (use_group_sync) {
g.sync();
} else {
__syncthreads();
}
#else
__syncthreads();
#endif
} /* endof for (i=0; i<n; i++) { */
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSEBAND(Mat, Vec, Vec);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSEBAND(Mat B, Mat A, const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr;
PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors");
Mat_SeqAIJCUSPARSE *cusparsestructA = (Mat_SeqAIJCUSPARSE *)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstructA;
CsrMatrix *matrixA;
const PetscInt n = A->rmap->n, *ic, *r;
const int *ai_d, *aj_d;
const PetscScalar *aa_d;
PetscScalar *ba_t = cusparseTriFactors->a_band_d;
int *bi_t = cusparseTriFactors->i_band_d;
int Ni = 10, team_size = 9, Nf = 1, nVec = 56, nconcurrent = 1, nsm = -1; // Nf is batch size - not used
PetscFunctionBegin;
if (A->rmap->n == 0) PetscFunctionReturn(0);
// cusparse setup
PetscCheck(cusparsestructA, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparsestructA");
matstructA = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestructA->mat; // matstruct->cprowIndices
PetscCheck(matstructA, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Missing mat struct");
matrixA = (CsrMatrix *)matstructA->mat;
PetscCheck(matrixA, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Missing matrix cusparsestructA->mat->mat");
// get data
ic = thrust::raw_pointer_cast(cusparseTriFactors->cpermIndices->data());
ai_d = thrust::raw_pointer_cast(matrixA->row_offsets->data());
aj_d = thrust::raw_pointer_cast(matrixA->column_indices->data());
aa_d = thrust::raw_pointer_cast(matrixA->values->data().get());
r = thrust::raw_pointer_cast(cusparseTriFactors->rpermIndices->data());
PetscCallCUDA(WaitForCUDA());
PetscCall(PetscLogGpuTimeBegin());
{
int bw = (int)(2. * (double)n - 1. - (double)(PetscSqrtReal(1. + 4. * ((double)n * (double)n - (double)b->nz)) + PETSC_MACHINE_EPSILON)) / 2, bm1 = bw - 1, nl = n / Nf;
#if !defined(AIJBANDUSEGROUPS)
Ni = 1 / nconcurrent;
Ni = 1;
#else
if (!cusparseTriFactors->init_dev_prop) {
int gpuid;
cusparseTriFactors->init_dev_prop = PETSC_TRUE;
cudaGetDevice(&gpuid);
cudaGetDeviceProperties(&cusparseTriFactors->dev_prop, gpuid);
}
nsm = cusparseTriFactors->dev_prop.multiProcessorCount;
Ni = nsm / Nf / nconcurrent;
#endif
team_size = bw / Ni + !!(bw % Ni);
nVec = PetscMin(bw, 1024 / team_size);
PetscCall(PetscInfo(A, "Matrix Bandwidth = %d, number SMs/block = %d, num concurency = %d, num fields = %d, numSMs/GPU = %d, thread group size = %d,%d\n", bw, Ni, nconcurrent, Nf, nsm, team_size, nVec));
{
dim3 dimBlockTeam(nVec, team_size);
dim3 dimBlockLeague(Nf, Ni);
mat_lu_factor_band_copy_aij_aij<<<dimBlockLeague, dimBlockTeam>>>(n, bw, r, ic, ai_d, aj_d, aa_d, bi_t, ba_t);
PetscCUDACheckLaunch; // does a sync
#if defined(AIJBANDUSEGROUPS)
if (Ni > 1) {
void *kernelArgs[] = {(void *)&n, (void *)&bw, (void *)&bi_t, (void *)&ba_t, (void *)&nsm};
cudaLaunchCooperativeKernel((void *)mat_lu_factor_band, dimBlockLeague, dimBlockTeam, kernelArgs, 0, NULL);
} else {
mat_lu_factor_band<<<dimBlockLeague, dimBlockTeam>>>(n, bw, bi_t, ba_t, NULL);
}
#else
mat_lu_factor_band<<<dimBlockLeague, dimBlockTeam>>>(n, bw, bi_t, ba_t, NULL);
#endif
PetscCUDACheckLaunch; // does a sync
#if defined(PETSC_USE_LOG)
PetscCall(PetscLogGpuFlops((PetscLogDouble)Nf * (bm1 * (bm1 + 1) * (PetscLogDouble)(2 * bm1 + 1) / 3 + (PetscLogDouble)2 * (nl - bw) * bw * bw + (PetscLogDouble)nl * (nl + 1) / 2)));
#endif
}
}
PetscCall(PetscLogGpuTimeEnd());
/* determine which version of MatSolve needs to be used. from MatLUFactorNumeric_AIJ_SeqAIJCUSPARSE */
B->ops->solve = MatSolve_SeqAIJCUSPARSEBAND;
B->ops->solvetranspose = NULL; // need transpose
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
PetscFunctionReturn(0);
}
PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSEBAND(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data, *b;
IS isicol;
const PetscInt *ic, *ai = a->i, *aj = a->j;
PetscScalar *ba_t;
int *bi_t;
PetscInt i, n = A->rmap->n, Nf = 1; // Nf batch size - not used
PetscInt nzBcsr, bwL, bwU;
PetscBool missing;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr;
PetscFunctionBegin;
PetscCheck(A->rmap->N == A->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "matrix must be square");
PetscCall(MatMissingDiagonal(A, &missing, &i));
PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i);
PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "!cusparseTriFactors");
PetscCall(MatIsStructurallySymmetric(A, &missing));
PetscCheck(missing, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "only structurally symmetric matrices supported");
PetscCall(ISInvertPermutation(iscol, PETSC_DECIDE, &isicol));
PetscCall(ISGetIndices(isicol, &ic));
PetscCall(MatSeqAIJSetPreallocation_SeqAIJ(B, MAT_SKIP_ALLOCATION, NULL));
b = (Mat_SeqAIJ *)(B)->data;
/* get band widths, MatComputeBandwidth should take a reordering ic and do this */
bwL = bwU = 0;
for (int rwb = 0; rwb < n; rwb++) {
const PetscInt rwa = ic[rwb], anz = ai[rwb + 1] - ai[rwb], *ajtmp = aj + ai[rwb];
for (int j = 0; j < anz; j++) {
PetscInt colb = ic[ajtmp[j]];
if (colb < rwa) { // L
if (rwa - colb > bwL) bwL = rwa - colb;
} else {
if (colb - rwa > bwU) bwU = colb - rwa;
}
}
}
PetscCall(ISRestoreIndices(isicol, &ic));
/* only support structurally symmetric, but it might work */
PetscCheck(bwL == bwU, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Only symmetric structure supported (now) W_L=%" PetscInt_FMT " W_U=%" PetscInt_FMT, bwL, bwU);
PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors));
nzBcsr = n + (2 * n - 1) * bwU - bwU * bwU;
b->maxnz = b->nz = nzBcsr;
cusparseTriFactors->nnz = b->nz; // only meta data needed: n & nz
PetscCall(PetscInfo(A, "Matrix Bandwidth = %" PetscInt_FMT ", nnz = %" PetscInt_FMT "\n", bwL, b->nz));
if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n);
PetscCallCUDA(cudaMalloc(&ba_t, (b->nz + 1) * sizeof(PetscScalar))); // include a place for flops
PetscCallCUDA(cudaMalloc(&bi_t, (n + 1) * sizeof(int)));
cusparseTriFactors->a_band_d = ba_t;
cusparseTriFactors->i_band_d = bi_t;
/* In b structure: Free imax, ilen, old a, old j. Allocate solve_work, new a, new j */
{
dim3 dimBlockTeam(1, 128);
dim3 dimBlockLeague(Nf, 1);
mat_lu_factor_band_init_set_i<<<dimBlockLeague, dimBlockTeam>>>(n, bwU, bi_t);
}
PetscCUDACheckLaunch; // does a sync
// setup data
if (!cusparseTriFactors->rpermIndices) {
const PetscInt *r;
PetscCall(ISGetIndices(isrow, &r));
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(r, r + n);
PetscCall(ISRestoreIndices(isrow, &r));
PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt)));
}
/* upper triangular indices */
if (!cusparseTriFactors->cpermIndices) {
const PetscInt *c;
PetscCall(ISGetIndices(isicol, &c));
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(c, c + n);
PetscCall(ISRestoreIndices(isicol, &c));
PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt)));
}
/* put together the new matrix */
b->free_a = PETSC_FALSE;
b->free_ij = PETSC_FALSE;
b->singlemalloc = PETSC_FALSE;
b->ilen = NULL;
b->imax = NULL;
b->row = isrow;
b->col = iscol;
PetscCall(PetscObjectReference((PetscObject)isrow));
PetscCall(PetscObjectReference((PetscObject)iscol));
b->icol = isicol;
PetscCall(PetscMalloc1(n + 1, &b->solve_work));
B->factortype = MAT_FACTOR_LU;
B->info.factor_mallocs = 0;
B->info.fill_ratio_given = 0;
if (ai[n]) {
B->info.fill_ratio_needed = ((PetscReal)(nzBcsr)) / ((PetscReal)ai[n]);
} else {
B->info.fill_ratio_needed = 0.0;
}
#if defined(PETSC_USE_INFO)
if (ai[n] != 0) {
PetscReal af = B->info.fill_ratio_needed;
PetscCall(PetscInfo(A, "Band fill ratio %g\n", (double)af));
} else {
PetscCall(PetscInfo(A, "Empty matrix\n"));
}
#endif
if (a->inode.size) PetscCall(PetscInfo(A, "Warning: using inodes in band solver.\n"));
PetscCall(MatSeqAIJCheckInode_FactorLU(B));
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSEBAND;
B->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
/* Use -pc_factor_mat_solver_type cusparseband */
PetscErrorCode MatFactorGetSolverType_seqaij_cusparse_band(Mat A, MatSolverType *type)
{
PetscFunctionBegin;
*type = MATSOLVERCUSPARSEBAND;
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat A, MatFactorType ftype, Mat *B)
{
PetscInt n = A->rmap->n;
PetscFunctionBegin;
PetscCall(MatCreate(PetscObjectComm((PetscObject)A), B));
PetscCall(MatSetSizes(*B, n, n, n, n));
(*B)->factortype = ftype;
(*B)->canuseordering = PETSC_TRUE;
PetscCall(MatSetType(*B, MATSEQAIJCUSPARSE));
if (ftype == MAT_FACTOR_LU) {
PetscCall(MatSetBlockSizesFromMats(*B, A, A));
(*B)->ops->ilufactorsymbolic = NULL; // MatILUFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSEBAND;
PetscCall(PetscStrallocpy(MATORDERINGRCM, (char **)&(*B)->preferredordering[MAT_FACTOR_LU]));
} else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Factor type not supported for CUSPARSEBAND Matrix Types");
PetscCall(MatSeqAIJSetPreallocation(*B, MAT_SKIP_ALLOCATION, NULL));
PetscCall(PetscObjectComposeFunction((PetscObject)(*B), "MatFactorGetSolverType_C", MatFactorGetSolverType_seqaij_cusparse_band));
PetscFunctionReturn(0);
}
#define WARP_SIZE 32
template <typename T>
__forceinline__ __device__ T wreduce(T a)
{
T b;
#pragma unroll
for (int i = WARP_SIZE / 2; i >= 1; i = i >> 1) {
b = __shfl_down_sync(0xffffffff, a, i);
a += b;
}
return a;
}
// reduce in a block, returns result in thread 0
template <typename T, int BLOCK_SIZE>
__device__ T breduce(T a)
{
constexpr int NWARP = BLOCK_SIZE / WARP_SIZE;
__shared__ double buf[NWARP];
int wid = threadIdx.x / WARP_SIZE;
int laneid = threadIdx.x % WARP_SIZE;
T b = wreduce<T>(a);
if (laneid == 0) buf[wid] = b;
__syncthreads();
if (wid == 0) {
if (threadIdx.x < NWARP) a = buf[threadIdx.x];
else a = 0;
for (int i = (NWARP + 1) / 2; i >= 1; i = i >> 1) a += __shfl_down_sync(0xffffffff, a, i);
}
return a;
}
// Band LU kernel --- ba_csr bi_csr
template <int BLOCK_SIZE>
__global__ void __launch_bounds__(256, 1) mat_solve_band(const PetscInt n, const PetscInt bw, const PetscScalar ba_csr[], PetscScalar x[])
{
const PetscInt Nf = gridDim.x, nloc = n / Nf, field = blockIdx.x, start = field * nloc, end = start + nloc, chopnz = bw * (bw + 1) / 2, blocknz = (2 * bw + 1) * nloc, blocknz_0 = blocknz - chopnz;
const PetscScalar *pLi;
const int tid = threadIdx.x;
/* Next, solve L */
pLi = ba_csr + (field == 0 ? 0 : blocknz_0 + (field - 1) * blocknz + bw); // diagonal (0,0) in field
for (int glbDD = start, locDD = 0; glbDD < end; glbDD++, locDD++) {
const PetscInt col = locDD < bw ? start : (glbDD - bw);
PetscScalar t = 0;
for (int j = col + tid, idx = tid; j < glbDD; j += blockDim.x, idx += blockDim.x) t += pLi[idx] * x[j];
#if defined(PETSC_USE_COMPLEX)
PetscReal tr = PetscRealPartComplex(t), ti = PetscImaginaryPartComplex(t);
PetscScalar tt(breduce<PetscReal, BLOCK_SIZE>(tr), breduce<PetscReal, BLOCK_SIZE>(ti));
t = tt;
#else
t = breduce<PetscReal, BLOCK_SIZE>(t);
#endif
if (threadIdx.x == 0) x[glbDD] -= t; // /1.0
__syncthreads();
// inc
pLi += glbDD - col; // get to diagonal
if (glbDD > n - 1 - bw) pLi += n - 1 - glbDD; // skip over U, only last block has funny offset
else pLi += bw;
pLi += 1; // skip to next row
if (field > 0 && (locDD + 1) < bw) pLi += bw - (locDD + 1); // skip padding at beginning (ear)
}
/* Then, solve U */
pLi = ba_csr + Nf * blocknz - 2 * chopnz - 1; // end of real data on block (diagonal)
if (field != Nf - 1) pLi -= blocknz_0 + (Nf - 2 - field) * blocknz + bw; // diagonal of last local row
for (int glbDD = end - 1, locDD = 0; glbDD >= start; glbDD--, locDD++) {
const PetscInt col = (locDD < bw) ? end - 1 : glbDD + bw; // end of row in U
PetscScalar t = 0;
for (int j = col - tid, idx = tid; j > glbDD; j -= blockDim.x, idx += blockDim.x) t += pLi[-idx] * x[j];
#if defined(PETSC_USE_COMPLEX)
PetscReal tr = PetscRealPartComplex(t), ti = PetscImaginaryPartComplex(t);
PetscScalar tt(breduce<PetscReal, BLOCK_SIZE>(tr), breduce<PetscReal, BLOCK_SIZE>(ti));
t = tt;
#else
t = breduce<PetscReal, BLOCK_SIZE>(PetscRealPart(t));
#endif
pLi -= col - glbDD; // diagonal
if (threadIdx.x == 0) {
x[glbDD] -= t;
x[glbDD] /= pLi[0];
}
__syncthreads();
// inc past L to start of previous U
pLi -= bw + 1;
if (glbDD < bw) pLi += bw - glbDD; // overshot in top left corner
if (((locDD + 1) < bw) && field != Nf - 1) pLi -= (bw - (locDD + 1)); // skip past right corner
}
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSEBAND(Mat A, Vec bb, Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr;
THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector;
PetscInt n = A->rmap->n, nz = cusparseTriFactors->nnz, Nf = 1; // Nf is batch size - not used
PetscInt bw = (int)(2. * (double)n - 1. - (double)(PetscSqrtReal(1. + 4. * ((double)n * (double)n - (double)nz)) + PETSC_MACHINE_EPSILON)) / 2; // quadric formula for bandwidth
PetscFunctionBegin;
if (A->rmap->n == 0) PetscFunctionReturn(0);
/* Get the GPU pointers */
PetscCall(VecCUDAGetArrayWrite(xx, &xarray));
PetscCall(VecCUDAGetArrayRead(bb, &barray));
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
PetscCall(PetscLogGpuTimeBegin());
/* First, reorder with the row permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin());
constexpr int block = 128;
mat_solve_band<block><<<Nf, block>>>(n, bw, cusparseTriFactors->a_band_d, tempGPU->data().get());
PetscCUDACheckLaunch; // does a sync
/* Last, reorder with the column permutation */
thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU);
PetscCall(VecCUDARestoreArrayRead(bb, &barray));
PetscCall(VecCUDARestoreArrayWrite(xx, &xarray));
PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n));
PetscCall(PetscLogGpuTimeEnd());
PetscFunctionReturn(0);
}
|
63738a5cb12583505b93c1846d59534376edad4a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cuda/mgard_cuda_common_internal.h"
#include "cuda/mgard_cuda_kernels.h"
#include "cuda/mgard_cuda_recompose_2d.h"
#include <fstream>
namespace mgard_cuda {
template <typename T>
void recompose_2D_cuda(mgard_cuda_handle<T> &handle, T *dv, int lddv) {
for (int l = handle.l_target; l > 0; --l) {
int stride = ::pow(2, l); // current stride
int Pstride = stride / 2;
copy_level(handle, handle.nrow, handle.ncol, handle.nr, handle.nc, Pstride,
Pstride, handle.dirow, handle.dicol, dv, lddv, handle.dwork,
handle.lddwork, 0);
assign_num_level(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
stride, stride, handle.dirow, handle.dicol, (T)0.0,
handle.dwork, handle.lddwork, 0);
mass_multiply_1(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
Pstride, Pstride, handle.dirow, handle.dicol,
handle.dcoords_c, handle.dwork, handle.lddwork, 0);
restriction_1(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
Pstride, Pstride, handle.dirow, handle.dicol,
handle.dcoords_c, handle.dwork, handle.lddwork, 0);
solve_tridiag_1(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
Pstride, stride, handle.dirow, handle.dicol,
handle.dcoords_c, handle.dwork, handle.lddwork, 0);
mass_multiply_2(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
Pstride, stride, handle.dirow, handle.dicol,
handle.dcoords_r, handle.dwork, handle.lddwork, 0);
restriction_2(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
Pstride, stride, handle.dirow, handle.dicol, handle.dcoords_r,
handle.dwork, handle.lddwork, 0);
solve_tridiag_2(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
stride, stride, handle.dirow, handle.dicol,
handle.dcoords_r, handle.dwork, handle.lddwork, 0);
subtract_level(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
stride, stride, handle.dirow, handle.dicol, dv, lddv,
handle.dwork, handle.lddwork, 0);
prolongate(handle, handle.nrow, handle.ncol, handle.nr, handle.nc, Pstride,
Pstride, handle.dirow, handle.dicol, handle.dcoords_r,
handle.dcoords_c, dv, lddv, 0);
}
}
template void recompose_2D_cuda<double>(mgard_cuda_handle<double> &handle,
double *dv, int lddv);
template void recompose_2D_cuda<float>(mgard_cuda_handle<float> &handle,
float *dv, int lddv);
template <typename T>
void recompose_2D_cuda_cpt(mgard_cuda_handle<T> &handle, T *dv, int lddv) {
T *dcv;
size_t dcv_pitch;
cudaMallocPitchHelper((void **)&dcv, &dcv_pitch, handle.nc * sizeof(T),
handle.nr);
int lddcv = dcv_pitch / sizeof(T);
org_to_pow2p1(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
handle.dirow, handle.dicol, dv, lddv, dcv, lddcv, 0);
for (int l = handle.l_target; l > 0; --l) {
int stride = ::pow(2, l); // current stride
int Pstride = stride / 2;
pow2p1_to_cpt_num_assign(handle, handle.nr, handle.nc, Pstride, Pstride,
(T)0.0, dcv, lddcv, handle.dwork, handle.lddwork,
0);
mass_multiply_1_cpt(handle, handle.nr_l[l - 1], handle.nc_l[l - 1], 1, 1,
handle.ddist_c_l[l - 1], handle.dwork, handle.lddwork,
0);
restriction_1_cpt(handle, handle.nr_l[l - 1], handle.nc_l[l - 1], 1, 1,
handle.ddist_c_l[l - 1], handle.dwork, handle.lddwork, 0);
solve_tridiag_1_cpt(handle, handle.nr_l[l - 1], handle.nc_l[l - 1], 1, 2,
handle.ddist_c_l[l], handle.am_col[0], handle.bm_col[0],
handle.dwork, handle.lddwork, 0);
mass_multiply_2_cpt(handle, handle.nr_l[l - 1], handle.nc_l[l - 1], 1, 2,
handle.ddist_r_l[l - 1], handle.dwork, handle.lddwork,
0);
restriction_2_cpt(handle, handle.nr_l[l - 1], handle.nc_l[l - 1], 1, 2,
handle.ddist_r_l[l - 1], handle.dwork, handle.lddwork, 0);
solve_tridiag_2_cpt(handle, handle.nr_l[l - 1], handle.nc_l[l - 1], 2, 2,
handle.ddist_r_l[l], handle.am_row[0], handle.bm_row[0],
handle.dwork, handle.lddwork, 0);
cpt_to_pow2p1_subtract(handle, handle.nr, handle.nc, 2, 2, stride, stride,
handle.dwork, handle.lddwork, dcv, lddcv, 0);
prolongate_cpt(handle, handle.nr, handle.nc, Pstride, Pstride,
handle.ddist_r_l[l - 1], handle.ddist_c_l[l - 1], dcv, lddcv,
0);
}
pow2p1_to_org(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
handle.dirow, handle.dicol, dcv, lddcv, dv, lddv, 0);
cudaFreeHelper(dcv);
}
template void recompose_2D_cuda_cpt<double>(mgard_cuda_handle<double> &handle,
double *dv, int lddv);
template void recompose_2D_cuda_cpt<float>(mgard_cuda_handle<float> &handle,
float *dv, int lddv);
} // namespace mgard_cuda
|
63738a5cb12583505b93c1846d59534376edad4a.cu
|
#include "cuda/mgard_cuda_common_internal.h"
#include "cuda/mgard_cuda_kernels.h"
#include "cuda/mgard_cuda_recompose_2d.h"
#include <fstream>
namespace mgard_cuda {
template <typename T>
void recompose_2D_cuda(mgard_cuda_handle<T> &handle, T *dv, int lddv) {
for (int l = handle.l_target; l > 0; --l) {
int stride = std::pow(2, l); // current stride
int Pstride = stride / 2;
copy_level(handle, handle.nrow, handle.ncol, handle.nr, handle.nc, Pstride,
Pstride, handle.dirow, handle.dicol, dv, lddv, handle.dwork,
handle.lddwork, 0);
assign_num_level(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
stride, stride, handle.dirow, handle.dicol, (T)0.0,
handle.dwork, handle.lddwork, 0);
mass_multiply_1(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
Pstride, Pstride, handle.dirow, handle.dicol,
handle.dcoords_c, handle.dwork, handle.lddwork, 0);
restriction_1(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
Pstride, Pstride, handle.dirow, handle.dicol,
handle.dcoords_c, handle.dwork, handle.lddwork, 0);
solve_tridiag_1(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
Pstride, stride, handle.dirow, handle.dicol,
handle.dcoords_c, handle.dwork, handle.lddwork, 0);
mass_multiply_2(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
Pstride, stride, handle.dirow, handle.dicol,
handle.dcoords_r, handle.dwork, handle.lddwork, 0);
restriction_2(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
Pstride, stride, handle.dirow, handle.dicol, handle.dcoords_r,
handle.dwork, handle.lddwork, 0);
solve_tridiag_2(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
stride, stride, handle.dirow, handle.dicol,
handle.dcoords_r, handle.dwork, handle.lddwork, 0);
subtract_level(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
stride, stride, handle.dirow, handle.dicol, dv, lddv,
handle.dwork, handle.lddwork, 0);
prolongate(handle, handle.nrow, handle.ncol, handle.nr, handle.nc, Pstride,
Pstride, handle.dirow, handle.dicol, handle.dcoords_r,
handle.dcoords_c, dv, lddv, 0);
}
}
template void recompose_2D_cuda<double>(mgard_cuda_handle<double> &handle,
double *dv, int lddv);
template void recompose_2D_cuda<float>(mgard_cuda_handle<float> &handle,
float *dv, int lddv);
template <typename T>
void recompose_2D_cuda_cpt(mgard_cuda_handle<T> &handle, T *dv, int lddv) {
T *dcv;
size_t dcv_pitch;
cudaMallocPitchHelper((void **)&dcv, &dcv_pitch, handle.nc * sizeof(T),
handle.nr);
int lddcv = dcv_pitch / sizeof(T);
org_to_pow2p1(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
handle.dirow, handle.dicol, dv, lddv, dcv, lddcv, 0);
for (int l = handle.l_target; l > 0; --l) {
int stride = std::pow(2, l); // current stride
int Pstride = stride / 2;
pow2p1_to_cpt_num_assign(handle, handle.nr, handle.nc, Pstride, Pstride,
(T)0.0, dcv, lddcv, handle.dwork, handle.lddwork,
0);
mass_multiply_1_cpt(handle, handle.nr_l[l - 1], handle.nc_l[l - 1], 1, 1,
handle.ddist_c_l[l - 1], handle.dwork, handle.lddwork,
0);
restriction_1_cpt(handle, handle.nr_l[l - 1], handle.nc_l[l - 1], 1, 1,
handle.ddist_c_l[l - 1], handle.dwork, handle.lddwork, 0);
solve_tridiag_1_cpt(handle, handle.nr_l[l - 1], handle.nc_l[l - 1], 1, 2,
handle.ddist_c_l[l], handle.am_col[0], handle.bm_col[0],
handle.dwork, handle.lddwork, 0);
mass_multiply_2_cpt(handle, handle.nr_l[l - 1], handle.nc_l[l - 1], 1, 2,
handle.ddist_r_l[l - 1], handle.dwork, handle.lddwork,
0);
restriction_2_cpt(handle, handle.nr_l[l - 1], handle.nc_l[l - 1], 1, 2,
handle.ddist_r_l[l - 1], handle.dwork, handle.lddwork, 0);
solve_tridiag_2_cpt(handle, handle.nr_l[l - 1], handle.nc_l[l - 1], 2, 2,
handle.ddist_r_l[l], handle.am_row[0], handle.bm_row[0],
handle.dwork, handle.lddwork, 0);
cpt_to_pow2p1_subtract(handle, handle.nr, handle.nc, 2, 2, stride, stride,
handle.dwork, handle.lddwork, dcv, lddcv, 0);
prolongate_cpt(handle, handle.nr, handle.nc, Pstride, Pstride,
handle.ddist_r_l[l - 1], handle.ddist_c_l[l - 1], dcv, lddcv,
0);
}
pow2p1_to_org(handle, handle.nrow, handle.ncol, handle.nr, handle.nc,
handle.dirow, handle.dicol, dcv, lddcv, dv, lddv, 0);
cudaFreeHelper(dcv);
}
template void recompose_2D_cuda_cpt<double>(mgard_cuda_handle<double> &handle,
double *dv, int lddv);
template void recompose_2D_cuda_cpt<float>(mgard_cuda_handle<float> &handle,
float *dv, int lddv);
} // namespace mgard_cuda
|
1b7096965f24b9bdd64b0c0a319f901f9bcaac37.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> hv_in(idata, idata + n);
thrust::host_vector<int> hv_out(odata, odata + n);
thrust::device_vector<int> dv_in = hv_in;
thrust::device_vector<int> dv_out = hv_out;
timer().startGpuTimer();
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(std::begin(dv_in),
std::end(dv_in),
std::begin(dv_out));
// thrust::exclusive_scan(idata,
// idata + n,
// odata);
timer().endGpuTimer();
hv_out = dv_out;
memcpy(odata, &hv_out[0], n * sizeof(int));
}
}
}
|
1b7096965f24b9bdd64b0c0a319f901f9bcaac37.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> hv_in(idata, idata + n);
thrust::host_vector<int> hv_out(odata, odata + n);
thrust::device_vector<int> dv_in = hv_in;
thrust::device_vector<int> dv_out = hv_out;
timer().startGpuTimer();
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(std::begin(dv_in),
std::end(dv_in),
std::begin(dv_out));
// thrust::exclusive_scan(idata,
// idata + n,
// odata);
timer().endGpuTimer();
hv_out = dv_out;
memcpy(odata, &hv_out[0], n * sizeof(int));
}
}
}
|
44ab8a072201e35d56668ac1d3ef936578b0f7aa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/top_k_kernel.h"
#include "paddle/fluid/operators/top_k_function_cuda.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/gather.cu.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
namespace ops = paddle::operators;
#define FIXED_BLOCK_DIM_BASE(dim, ...) \
case (dim): { \
constexpr auto kBlockDim = (dim); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM(...) \
FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__)
template <typename T, typename Context>
void TopkKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& k_scalar,
int axis,
bool largest,
bool sorted,
DenseTensor* out,
DenseTensor* indices) {
const auto* input = &x;
// get the input dims
const auto& in_dims = input->dims();
// calcluate the real axis
if (axis < 0) axis += in_dims.size();
int k = k_scalar.to<int>();
if (k_scalar.FromTensor()) {
phi::DDim out_dims = out->dims();
out_dims[axis] = k;
out->Resize(out_dims);
indices->Resize(out_dims);
}
const auto& out_dims = out->dims();
const T* input_data = input->data<T>();
T* output_data = dev_ctx.template Alloc<T>(out);
int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices);
if (axis == in_dims.size() - 1) {
// if get the topK from the last axis
const int64_t& input_height =
phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
if (k > input_width) {
k = input_width;
}
// The conclusion is drawn from the data through multiple sets of
// statistics
if (input_width >= 128 && k >= input_width * 0.75) {
auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
&dev_ctx);
if (ops::SortTopk<T>(*ctx,
input,
input_width,
input_height,
k,
out,
indices,
largest)) {
// Successed, return.
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
#if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 9000
if (input_width >= 1024 && in_dims.size() == 1) {
// 1. Gather TopK, but without sorting
constexpr int max_num_threads = 1024;
if (largest) {
hipLaunchKernelGGL(( ops::RadixTopK<
T,
true>), dim3(input_height), dim3(max_num_threads), 0, dev_ctx.stream(),
input_data,
k,
input_height,
input_width,
output_data,
indices_data);
} else {
hipLaunchKernelGGL(( ops::RadixTopK<
T,
false>), dim3(input_height), dim3(max_num_threads), 0, dev_ctx.stream(),
input_data,
k,
input_height,
input_width,
output_data,
indices_data);
}
// 2. Sort if needed
if (sorted) {
DenseTensor sorted_output;
DenseTensor sorted_indices;
DenseTensor gather_indices;
sorted_output.Resize(out->dims());
sorted_indices.Resize(indices->dims());
gather_indices.Resize(indices->dims());
dev_ctx.template Alloc<T>(&sorted_output);
dev_ctx.template Alloc<int64_t>(&sorted_indices);
dev_ctx.template Alloc<int64_t>(&gather_indices);
auto* ctx =
reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
&dev_ctx);
if (ops::SortTopk<T>(*ctx,
out,
k,
input_height,
k,
&sorted_output,
&sorted_indices,
largest)) {
funcs::GPUGather<int64_t, int64_t>(
dev_ctx, *indices, sorted_indices, &gather_indices);
Copy(dev_ctx, gather_indices, indices->place(), false, indices);
Copy(dev_ctx, sorted_output, out->place(), false, out);
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
} else {
return;
}
}
#endif
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
switch (ops::GetDesiredBlockDim(input_width)) {
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(( FIXED_BLOCK_DIM(ops::KeMatrixTopK<
T,
20,
kBlockDim>), dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(),
output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#else
hipLaunchKernelGGL(( FIXED_BLOCK_DIM(ops::KeMatrixTopK<
T,
5,
kBlockDim>), dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(),
output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#endif
default:
PADDLE_THROW(errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
} else {
// if get topK not from the last axis, will tranpose the tensor and get
// TopK
// first step, prepare the trans args for the tranpose
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
phi::DDim trans_dims(in_dims);
phi::DDim trans_out_dims(out->dims());
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = out_dims[trans[i]];
}
// second step, tranpose the input
DenseTensor trans_input;
trans_input.Resize(trans_dims);
dev_ctx.template Alloc<T>(&trans_input);
int ndims = trans.size();
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, *input, &trans_input, trans);
// third step, calcluate the topk
// allocate the tmp cuda memory for the tmp result
DenseTensor trans_ind;
DenseTensor trans_out;
trans_ind.Resize(trans_out_dims);
trans_out.Resize(trans_out_dims);
dev_ctx.template Alloc<int64_t>(&trans_ind);
dev_ctx.template Alloc<T>(&trans_out);
const int64_t input_height =
phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
if (k > input_width) k = input_width;
// The conclusion is drawn from the data through multiple sets of
// statistics
if (input_width >= 128 && k >= input_width * 0.75) {
auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
&dev_ctx);
if (ops::SortTopk<T>(*ctx,
&trans_input,
input_width,
input_height,
k,
&trans_out,
&trans_ind,
largest)) {
// last step, tranpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, out, trans);
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
switch (ops::GetDesiredBlockDim(input_width)) {
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(( FIXED_BLOCK_DIM(ops::KeMatrixTopK<
T,
20,
kBlockDim>), dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(),
trans_out.data<T>(),
k,
trans_ind.data<int64_t>(),
trans_input.data<T>(),
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#else
hipLaunchKernelGGL(( FIXED_BLOCK_DIM(ops::KeMatrixTopK<
T,
5,
kBlockDim>), dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(),
trans_out.data<T>(),
k,
trans_ind.data<int64_t>(),
trans_input.data<T>(),
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#endif
default:
PADDLE_THROW(errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
// last step, tranpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, out, trans);
}
}
#undef FIXED_BLOCK_DIM_BASE
#undef FIXED_BLOCK_DIM
} // namespace phi
PD_REGISTER_KERNEL(top_k,
GPU,
ALL_LAYOUT,
phi::TopkKernel,
float,
double,
int,
int64_t,
phi::dtype::float16) {}
|
44ab8a072201e35d56668ac1d3ef936578b0f7aa.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/top_k_kernel.h"
#include "paddle/fluid/operators/top_k_function_cuda.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/gather.cu.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
namespace ops = paddle::operators;
#define FIXED_BLOCK_DIM_BASE(dim, ...) \
case (dim): { \
constexpr auto kBlockDim = (dim); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM(...) \
FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__)
template <typename T, typename Context>
void TopkKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& k_scalar,
int axis,
bool largest,
bool sorted,
DenseTensor* out,
DenseTensor* indices) {
const auto* input = &x;
// get the input dims
const auto& in_dims = input->dims();
// calcluate the real axis
if (axis < 0) axis += in_dims.size();
int k = k_scalar.to<int>();
if (k_scalar.FromTensor()) {
phi::DDim out_dims = out->dims();
out_dims[axis] = k;
out->Resize(out_dims);
indices->Resize(out_dims);
}
const auto& out_dims = out->dims();
const T* input_data = input->data<T>();
T* output_data = dev_ctx.template Alloc<T>(out);
int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices);
if (axis == in_dims.size() - 1) {
// if get the topK from the last axis
const int64_t& input_height =
phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
if (k > input_width) {
k = input_width;
}
// The conclusion is drawn from the data through multiple sets of
// statistics
if (input_width >= 128 && k >= input_width * 0.75) {
auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
&dev_ctx);
if (ops::SortTopk<T>(*ctx,
input,
input_width,
input_height,
k,
out,
indices,
largest)) {
// Successed, return.
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 9000
if (input_width >= 1024 && in_dims.size() == 1) {
// 1. Gather TopK, but without sorting
constexpr int max_num_threads = 1024;
if (largest) {
ops::RadixTopK<
T,
true><<<input_height, max_num_threads, 0, dev_ctx.stream()>>>(
input_data,
k,
input_height,
input_width,
output_data,
indices_data);
} else {
ops::RadixTopK<
T,
false><<<input_height, max_num_threads, 0, dev_ctx.stream()>>>(
input_data,
k,
input_height,
input_width,
output_data,
indices_data);
}
// 2. Sort if needed
if (sorted) {
DenseTensor sorted_output;
DenseTensor sorted_indices;
DenseTensor gather_indices;
sorted_output.Resize(out->dims());
sorted_indices.Resize(indices->dims());
gather_indices.Resize(indices->dims());
dev_ctx.template Alloc<T>(&sorted_output);
dev_ctx.template Alloc<int64_t>(&sorted_indices);
dev_ctx.template Alloc<int64_t>(&gather_indices);
auto* ctx =
reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
&dev_ctx);
if (ops::SortTopk<T>(*ctx,
out,
k,
input_height,
k,
&sorted_output,
&sorted_indices,
largest)) {
funcs::GPUGather<int64_t, int64_t>(
dev_ctx, *indices, sorted_indices, &gather_indices);
Copy(dev_ctx, gather_indices, indices->place(), false, indices);
Copy(dev_ctx, sorted_output, out->place(), false, out);
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
} else {
return;
}
}
#endif
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
switch (ops::GetDesiredBlockDim(input_width)) {
#ifdef PADDLE_WITH_HIP
FIXED_BLOCK_DIM(ops::KeMatrixTopK<
T,
20,
kBlockDim><<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(
output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#else
FIXED_BLOCK_DIM(ops::KeMatrixTopK<
T,
5,
kBlockDim><<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(
output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#endif
default:
PADDLE_THROW(errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
} else {
// if get topK not from the last axis, will tranpose the tensor and get
// TopK
// first step, prepare the trans args for the tranpose
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
phi::DDim trans_dims(in_dims);
phi::DDim trans_out_dims(out->dims());
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = out_dims[trans[i]];
}
// second step, tranpose the input
DenseTensor trans_input;
trans_input.Resize(trans_dims);
dev_ctx.template Alloc<T>(&trans_input);
int ndims = trans.size();
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, *input, &trans_input, trans);
// third step, calcluate the topk
// allocate the tmp cuda memory for the tmp result
DenseTensor trans_ind;
DenseTensor trans_out;
trans_ind.Resize(trans_out_dims);
trans_out.Resize(trans_out_dims);
dev_ctx.template Alloc<int64_t>(&trans_ind);
dev_ctx.template Alloc<T>(&trans_out);
const int64_t input_height =
phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
if (k > input_width) k = input_width;
// The conclusion is drawn from the data through multiple sets of
// statistics
if (input_width >= 128 && k >= input_width * 0.75) {
auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
&dev_ctx);
if (ops::SortTopk<T>(*ctx,
&trans_input,
input_width,
input_height,
k,
&trans_out,
&trans_ind,
largest)) {
// last step, tranpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, out, trans);
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
switch (ops::GetDesiredBlockDim(input_width)) {
#ifdef PADDLE_WITH_HIP
FIXED_BLOCK_DIM(ops::KeMatrixTopK<
T,
20,
kBlockDim><<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(
trans_out.data<T>(),
k,
trans_ind.data<int64_t>(),
trans_input.data<T>(),
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#else
FIXED_BLOCK_DIM(ops::KeMatrixTopK<
T,
5,
kBlockDim><<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(
trans_out.data<T>(),
k,
trans_ind.data<int64_t>(),
trans_input.data<T>(),
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#endif
default:
PADDLE_THROW(errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
// last step, tranpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, out, trans);
}
}
#undef FIXED_BLOCK_DIM_BASE
#undef FIXED_BLOCK_DIM
} // namespace phi
PD_REGISTER_KERNEL(top_k,
GPU,
ALL_LAYOUT,
phi::TopkKernel,
float,
double,
int,
int64_t,
phi::dtype::float16) {}
|
9fd7e30177ce504763a0ffeaa045b9ba7e1b8b74.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************************
c* Multimodal Deformable Image Registration *
c* via Mutual Information or Bhattacharyya Distantce *
c* Version: 1.0 *
c* Language: C, CUDA *
c* *
c* Developer: Yifei Lou *
c* Email: [email protected] *
c* *
c* School of Electrical and Computer Engineering *
c* Georgia Institute of Technology *
c* Atlanta, GA, 30318 *
c* Website: http://groups.bme.gatech.edu/groups/bil/ *
c* *
c* Copyright (c) 2011 *
c* All rights reserved. *
c* *
c* Permission to use, copy, or modify this code and its *
c* documentation for scientific purpose is hereby granted *
c* without fee, provided that this copyright notice appear in *
c* all copies and that both that copyright notice and this *
c* permission notice appear in supporting documentation. The use *
c* for commercial purposes is prohibited without permission. *
c* *
c* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND *
c* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, *
c* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *
c* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *
c* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR *
c* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *
c* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT *
c* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF*
c* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED *
c* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *
c* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
c* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF *
c* THE POSSIBILITY OF SUCH DAMAGE. *
c* *
c******************************************************************/
/*******************************************************************
c* Short discription *
c* main code of the multi-modal deformable registration *
c* it calls all the other components *
c******************************************************************/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <iostream>
// includes, gloable variables
#include "global.h"
#include "convolution.hip"
// includes, project
#include <cutil_inline.h>
#include <rocblas.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <hip/hip_runtime.h> // for float2
using namespace std;
using namespace thrust;
// include files
#include "initialize.cu"
#include "funcHistogram.cu"
#include "funcImageDomain.cu"
#include "compute.cu"
#include "finalize.cu"
/****************************************************
main program
****************************************************/
int main( int argc, char** argv)
{
cout << endl << "****************************************" << endl;
cout << "Computation parameters..." << endl;
cout << "****************************************" << endl ;
int device = DEVICENUMBER;
// device number
hipSetDevice(device);
cout << "Using device # " << device << endl;
// choose which device to use
hipGetDeviceCount(&deviceCount);
hipGetDeviceProperties(&dP,device);
cout<<"Max threads per block: "<<dP.maxThreadsPerBlock<<endl;
cout<<"Max Threads DIM: "<<dP.maxThreadsDim[0]<<" x "<<dP.maxThreadsDim[1]<<" x "<<dP.maxThreadsDim[2]<<endl;
cout<<"Max Grid Size: "<<dP.maxGridSize[0]<<" x "<<dP.maxGridSize[1]<<" x "<<dP.maxGridSize[2]<<endl;
printf("Device %d: \"%s\" with Compute %d.%d capability\n",
device, dP.name, dP.major, dP.minor);
// obtain computing resource
nblocks_hist.x = NBLOCKX;
nblocks_hist.y = ((1 + (nBin*nBin - 1)/NTHREAD_PER_BLOCK) - 1) / NBLOCKX + 1;
cout << endl << "****************************************" << endl;
cout << "Computing starts..." << endl;
cout << "****************************************" << endl << endl;
// mark the start total time timer
unsigned int totalTimer = 0;
cutilCheckError( cutCreateTimer( &totalTimer));
cutilCheckError( cutStartTimer( totalTimer));
/******************************************************
initialize
******************************************************/
cout << "\n\n";
cout << "Initializing MI 3Dreg program...\n\n";
////// CBLAS initialization ////////////////////////////
cout << "Initializing CUBLAS..." << endl;
cublasStatus status = hipblasInit();
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf (stderr, "!!!! CUBLAS initialization error\n");
getchar();
exit(0);
}
// initialize CUBLAS
initData();
initGaussKernel();
/******************************************************
start iterations
******************************************************/
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
// mark the start time
cout << "\n\n";
cout << "Performing registration...\n\n";
for(int scale = NSCALE-1; scale >=0; scale--)
{
NX = NX0/pow(2, scale);
NY = NY0/pow(2, scale);
NZ = (NZ0-1)/pow(2, scale) +1;
sDATA_SIZE = (NX*NY*NZ)* sizeof(float);
nblocks.x = NBLOCKX;
nblocks.y = ((1 + (NX*NY*NZ - 1)/NTHREAD_PER_BLOCK) - 1) / NBLOCKX + 1;
printf("current scale = %d, size of image = %d x %d x %d ... \n", scale, NX, NY, NZ);
if(scale<NSCALE-1)
{
hipLaunchKernelGGL(( upSample), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x[scale+1], d_mv_x[scale], NX, NY, NZ);
hipLaunchKernelGGL(( upSample), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_y[scale+1], d_mv_y[scale], NX, NY, NZ);
hipLaunchKernelGGL(( upSample), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_z[scale+1], d_mv_z[scale], NX, NY, NZ);
}
compute(d_im_move[scale], d_im_static[scale], d_mv_x[scale], d_mv_y[scale], d_mv_z[scale], MAX_ITER);
printf("\n\n");
}
hipDeviceSynchronize();
cutilCheckError( cutStopTimer( timer));
printf("\n\n****************************************\n");
printf( "Computing time: %f (ms)\n", cutGetTimerValue( timer));
printf("****************************************\n\n\n");
cutilCheckError( cutDeleteTimer( timer));
// mark the end timer and print
/******************************************************
finalize
******************************************************/
printf("Finalizing program...\n\n");
fina();
/**** shut down CBLAS ********/
status = hipblasShutdown();
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf (stderr, "!!!! shutdown error (A)\n");
getchar();
exit(0);
}
// Shut down CUBLAS
hipDeviceSynchronize();
// mark the end total timer
cutilCheckError( cutStopTimer( totalTimer));
printf("\n\n****************************************\n");
printf( "Entire program time: %f (ms)\n", cutGetTimerValue( totalTimer));
printf("****************************************\n\n\n");
cutilCheckError( cutDeleteTimer( totalTimer));
printf("Have a nice day!\n");
hipDeviceReset();
cutilExit(argc, argv);
return 0;
}
|
9fd7e30177ce504763a0ffeaa045b9ba7e1b8b74.cu
|
/*******************************************************************
c* Multimodal Deformable Image Registration *
c* via Mutual Information or Bhattacharyya Distantce *
c* Version: 1.0 *
c* Language: C, CUDA *
c* *
c* Developer: Yifei Lou *
c* Email: [email protected] *
c* *
c* School of Electrical and Computer Engineering *
c* Georgia Institute of Technology *
c* Atlanta, GA, 30318 *
c* Website: http://groups.bme.gatech.edu/groups/bil/ *
c* *
c* Copyright (c) 2011 *
c* All rights reserved. *
c* *
c* Permission to use, copy, or modify this code and its *
c* documentation for scientific purpose is hereby granted *
c* without fee, provided that this copyright notice appear in *
c* all copies and that both that copyright notice and this *
c* permission notice appear in supporting documentation. The use *
c* for commercial purposes is prohibited without permission. *
c* *
c* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND *
c* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, *
c* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *
c* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *
c* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR *
c* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *
c* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT *
c* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF*
c* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED *
c* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *
c* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
c* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF *
c* THE POSSIBILITY OF SUCH DAMAGE. *
c* *
c******************************************************************/
/*******************************************************************
c* Short discription *
c* main code of the multi-modal deformable registration *
c* it calls all the other components *
c******************************************************************/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <iostream>
// includes, gloable variables
#include "global.h"
#include "convolution.cu"
// includes, project
#include <cutil_inline.h>
#include <cublas.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <cuda.h> // for float2
using namespace std;
using namespace thrust;
// include files
#include "initialize.cu"
#include "funcHistogram.cu"
#include "funcImageDomain.cu"
#include "compute.cu"
#include "finalize.cu"
/****************************************************
main program
****************************************************/
int main( int argc, char** argv)
{
cout << endl << "****************************************" << endl;
cout << "Computation parameters..." << endl;
cout << "****************************************" << endl ;
int device = DEVICENUMBER;
// device number
cudaSetDevice(device);
cout << "Using device # " << device << endl;
// choose which device to use
cudaGetDeviceCount(&deviceCount);
cudaGetDeviceProperties(&dP,device);
cout<<"Max threads per block: "<<dP.maxThreadsPerBlock<<endl;
cout<<"Max Threads DIM: "<<dP.maxThreadsDim[0]<<" x "<<dP.maxThreadsDim[1]<<" x "<<dP.maxThreadsDim[2]<<endl;
cout<<"Max Grid Size: "<<dP.maxGridSize[0]<<" x "<<dP.maxGridSize[1]<<" x "<<dP.maxGridSize[2]<<endl;
printf("Device %d: \"%s\" with Compute %d.%d capability\n",
device, dP.name, dP.major, dP.minor);
// obtain computing resource
nblocks_hist.x = NBLOCKX;
nblocks_hist.y = ((1 + (nBin*nBin - 1)/NTHREAD_PER_BLOCK) - 1) / NBLOCKX + 1;
cout << endl << "****************************************" << endl;
cout << "Computing starts..." << endl;
cout << "****************************************" << endl << endl;
// mark the start total time timer
unsigned int totalTimer = 0;
cutilCheckError( cutCreateTimer( &totalTimer));
cutilCheckError( cutStartTimer( totalTimer));
/******************************************************
initialize
******************************************************/
cout << "\n\n";
cout << "Initializing MI 3Dreg program...\n\n";
////// CBLAS initialization ////////////////////////////
cout << "Initializing CUBLAS..." << endl;
cublasStatus status = cublasInit();
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf (stderr, "!!!! CUBLAS initialization error\n");
getchar();
exit(0);
}
// initialize CUBLAS
initData();
initGaussKernel();
/******************************************************
start iterations
******************************************************/
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
// mark the start time
cout << "\n\n";
cout << "Performing registration...\n\n";
for(int scale = NSCALE-1; scale >=0; scale--)
{
NX = NX0/pow(2, scale);
NY = NY0/pow(2, scale);
NZ = (NZ0-1)/pow(2, scale) +1;
sDATA_SIZE = (NX*NY*NZ)* sizeof(float);
nblocks.x = NBLOCKX;
nblocks.y = ((1 + (NX*NY*NZ - 1)/NTHREAD_PER_BLOCK) - 1) / NBLOCKX + 1;
printf("current scale = %d, size of image = %d x %d x %d ... \n", scale, NX, NY, NZ);
if(scale<NSCALE-1)
{
upSample<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x[scale+1], d_mv_x[scale], NX, NY, NZ);
upSample<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_y[scale+1], d_mv_y[scale], NX, NY, NZ);
upSample<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_z[scale+1], d_mv_z[scale], NX, NY, NZ);
}
compute(d_im_move[scale], d_im_static[scale], d_mv_x[scale], d_mv_y[scale], d_mv_z[scale], MAX_ITER);
printf("\n\n");
}
cudaThreadSynchronize();
cutilCheckError( cutStopTimer( timer));
printf("\n\n****************************************\n");
printf( "Computing time: %f (ms)\n", cutGetTimerValue( timer));
printf("****************************************\n\n\n");
cutilCheckError( cutDeleteTimer( timer));
// mark the end timer and print
/******************************************************
finalize
******************************************************/
printf("Finalizing program...\n\n");
fina();
/**** shut down CBLAS ********/
status = cublasShutdown();
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf (stderr, "!!!! shutdown error (A)\n");
getchar();
exit(0);
}
// Shut down CUBLAS
cudaThreadSynchronize();
// mark the end total timer
cutilCheckError( cutStopTimer( totalTimer));
printf("\n\n****************************************\n");
printf( "Entire program time: %f (ms)\n", cutGetTimerValue( totalTimer));
printf("****************************************\n\n\n");
cutilCheckError( cutDeleteTimer( totalTimer));
printf("Have a nice day!\n");
cudaThreadExit();
cutilExit(argc, argv);
return 0;
}
|
1688b9bd96acdb98efb46ab6c93c9f7d5a82cef5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box2d2r-512-9-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 49
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.03125f * A[t%2][i-2][j-2] +
0.03126f * A[t%2][i-2][j-1] +
0.03127f * A[t%2][i-2][j] +
0.03128f * A[t%2][i-2][j+1] +
0.03129f * A[t%2][i-2][j+2] +
0.03130f * A[t%2][i-1][j-2] +
0.03131f * A[t%2][i-1][j-1] +
0.03132f * A[t%2][i-1][j] +
0.03133f * A[t%2][i-1][j+1] +
0.03134f * A[t%2][i-1][j+2] +
0.03135f * A[t%2][i][j-2] +
0.03136f * A[t%2][i][j-1] +
0.24712f * A[t%2][i][j] +
0.03138f * A[t%2][i][j+1] +
0.03139f * A[t%2][i][j+2] +
0.03140f * A[t%2][i+1][j-2] +
0.03141f * A[t%2][i+1][j-1] +
0.03142f * A[t%2][i+1][j] +
0.03143f * A[t%2][i+1][j+1] +
0.03144f * A[t%2][i+1][j+2] +
0.03145f * A[t%2][i+2][j-2] +
0.03146f * A[t%2][i+2][j-1] +
0.03147f * A[t%2][i+2][j] +
0.03148f * A[t%2][i+2][j+1] +
0.03149f * A[t%2][i+2][j+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
1688b9bd96acdb98efb46ab6c93c9f7d5a82cef5.cu
|
#include <assert.h>
#include <stdio.h>
#include "box2d2r-512-9-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 49
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.03125f * A[t%2][i-2][j-2] +
0.03126f * A[t%2][i-2][j-1] +
0.03127f * A[t%2][i-2][j] +
0.03128f * A[t%2][i-2][j+1] +
0.03129f * A[t%2][i-2][j+2] +
0.03130f * A[t%2][i-1][j-2] +
0.03131f * A[t%2][i-1][j-1] +
0.03132f * A[t%2][i-1][j] +
0.03133f * A[t%2][i-1][j+1] +
0.03134f * A[t%2][i-1][j+2] +
0.03135f * A[t%2][i][j-2] +
0.03136f * A[t%2][i][j-1] +
0.24712f * A[t%2][i][j] +
0.03138f * A[t%2][i][j+1] +
0.03139f * A[t%2][i][j+2] +
0.03140f * A[t%2][i+1][j-2] +
0.03141f * A[t%2][i+1][j-1] +
0.03142f * A[t%2][i+1][j] +
0.03143f * A[t%2][i+1][j+1] +
0.03144f * A[t%2][i+1][j+2] +
0.03145f * A[t%2][i+2][j-2] +
0.03146f * A[t%2][i+2][j-1] +
0.03147f * A[t%2][i+2][j] +
0.03148f * A[t%2][i+2][j+1] +
0.03149f * A[t%2][i+2][j+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
08e7cab35ba0464aae056255e72d387f5a9672d3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define REPEAT 1
__global__ void arrayFunc(float* d_idata, float* d_jdata, float* d_odata, int size)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
for(int i=0; i < REPEAT; i++)
d_odata[tid] = d_idata[tid] * __expf(d_jdata[tid]);
}
}
void initArrayData(float * array, float alpha, int size);
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size);
void getChunkInfo(int gpuID, int i, int *d_offset, int *chunk_size, int *h_offset, int *chunk_stream, int nSize, int chunk_size_max, int num_chunk, int num_streams, int gpu_number);
#define NSIZE 2097152
#define CHUNKSIZEMAX 65536
#define NUMSTREAMS 8
#define NGPU 2
int
main (void) {
float *h_a, *h_b, *h_c;
//-- insert CUDA code ----------------
// buffers declarations
float *d_a[NGPU], *d_b[NGPU], *d_c[NGPU];
//------------------------------------
int nsize = NSIZE;
int nThreads = 256;
int nBlocks;
hipEvent_t start, end;
float eventEtime;
int chunk_size_max = CHUNKSIZEMAX;
int num_streams = NUMSTREAMS;
int num_chunk;
int i;
int h_offset, d_offset;
int chunk_size, chunk_stream;
//-- insert CUDA code ----------------
// streams declarations
hipStream_t streams[NGPU][NUMSTREAMS];
//------------------------------------
int gpuID, gpu_number;
// allocation and initialization of host buffers
hipHostMalloc((void**)&h_a, nsize * sizeof(float));
hipHostMalloc((void**)&h_b, nsize * sizeof(float));
hipHostMalloc((void**)&h_c, nsize * sizeof(float));
initArrayData(h_a, 1.0f, nsize);
initArrayData(h_b, 10.0f, nsize);
hipGetDeviceCount( &gpu_number );
// chunk number calculation
num_chunk = (nsize/gpu_number-1) / chunk_size_max + 1;
printf("Number of gpu: %d\n", gpu_number);
printf("Number of elements: %d\n", nsize);
printf("Number of streams: %d\n", num_streams);
printf("Number of chunks per GPU: %d\n", num_chunk);
for (gpuID=0; gpuID<gpu_number; gpuID++) {
//-- insert CUDA code ----------------
// GPU selection
hipSetDevice(gpuID);
// allocation of device buffers
hipMalloc((void**)&d_a[gpuID], num_streams * chunk_size_max * sizeof(float));
hipMalloc((void**)&d_b[gpuID], num_streams * chunk_size_max * sizeof(float));
hipMalloc((void**)&d_c[gpuID], num_streams * chunk_size_max * sizeof(float));
// streams creation
for (i = 0; i< num_streams; i++)
hipStreamCreate(&streams[gpuID][i]);
//------------------------------------
}
// creation of cuda events: start, end
hipSetDevice(0);
hipEventCreate(&start);
hipEventCreate(&end);
printf ("\nGPU computation ... ");
hipEventRecord(start,0);
for (gpuID = 0; gpuID < gpu_number; gpuID++) {
hipSetDevice(gpuID);
for (i = 0; i < num_chunk; i++) {
// please see getChunkInfo function description
getChunkInfo(gpuID, i, &d_offset, &chunk_size, &h_offset, &chunk_stream, nsize, chunk_size_max, num_chunk, num_streams, gpu_number);
//-- insert CUDA code ----------------
// host to device buffer copies
hipMemcpyAsync(d_a[gpuID]+d_offset, h_a+h_offset, chunk_size*sizeof(float), hipMemcpyHostToDevice, streams[gpuID][chunk_stream]);
hipMemcpyAsync(d_b[gpuID]+d_offset, h_b+h_offset, chunk_size*sizeof(float), hipMemcpyHostToDevice, streams[gpuID][chunk_stream]);
//------------------------------------
// block number calculation
nBlocks = (chunk_size-1) / nThreads + 1;
//-- insert CUDA code ----------------
// arrayFunc kernel launch
hipLaunchKernelGGL(( arrayFunc), dim3(nBlocks),dim3(nThreads), 0, streams[gpuID][chunk_stream], d_a[gpuID]+d_offset, d_b[gpuID]+d_offset, d_c[gpuID]+d_offset, chunk_size);
//------------------------------------
//-- insert CUDA code ----------------
// copy back of results from device
hipMemcpyAsync(h_c+h_offset, d_c[gpuID]+d_offset, chunk_size*sizeof(float), hipMemcpyDeviceToHost, streams[gpuID][chunk_stream]);
//------------------------------------
}
}
for (gpuID = 0; gpuID < gpu_number; gpuID++) {
hipSetDevice(gpuID);
hipDeviceSynchronize();
}
hipSetDevice(0);
hipEventRecord(end,0);
hipEventSynchronize(end);
hipEventElapsedTime(&eventEtime, start, end);
printf ("ok\n");
printf("Elapsed time on GPU: %.2f ms\n", eventEtime);
// host computation
printf("\nCPU computation ... ");
float *cpuResult;
float eventTimeCPU;
hipHostMalloc((void**)&cpuResult, nsize * sizeof(float));
hipEventRecord(start,0);
arrayFuncCPU(h_a, h_b, cpuResult, nsize);
hipEventRecord(end,0);
hipEventSynchronize(end);
hipEventElapsedTime(&eventTimeCPU, start, end);
printf ("ok\n");
printf("Elapsed time on CPU: %.2f ms\n", eventTimeCPU);
printf("\nSpeed UP CPU/GPU %.1fx\n", eventTimeCPU/eventEtime);
printf("\nCheck results:\n");
printf ("h_c[0] = %f\n", h_c[0]);
printf ("cpuResult[0] = %f\n", cpuResult[0]);
// free resources on device
for (gpuID = 0; gpuID < gpu_number; gpuID++) {
hipSetDevice(gpuID);
for (i = 0; i< num_streams; i++)
hipStreamDestroy(streams[gpuID][i]);
}
hipSetDevice(0);
hipEventDestroy(start);
hipEventDestroy(end);
// free resources on host
hipHostFree(h_a);
hipHostFree(h_b);
hipHostFree(h_c);
return 0;
}
void
initArrayData(float * array, float alpha, int size)
{
int i;
for (i=0; i< size; i++)
array[i] = alpha * (float) rand() / (float) RAND_MAX;
}
// getChunkInfo is used to compute some useful information starting
// from the i-th chunk, the total number of used chunks,
// the maximum chunk size and the array size to process
// getChunkInfo returns:
// * chunk_size: the number of elements to use in current chunk
// * chunk_stream: the stream to use to process i-th chunk
// * the X_offsets to use for accessing the correct elements of host
// and device arrays in data movements and kernel launch
//
void getChunkInfo(int gpuID, int i, int *d_offset, int *chunk_size, int *h_offset, int *chunk_stream, int nSize, int chunk_size_max, int num_chunk, int num_streams, int gpu_number){
int Reminder = nSize%chunk_size_max;
*h_offset = i*chunk_size_max + gpuID * (nSize/gpu_number);
*chunk_stream = i%num_streams;
*chunk_size = chunk_size_max;
*d_offset = *chunk_stream * chunk_size_max;
if (Reminder && (i == num_chunk-1)) *chunk_size = Reminder;
}
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size)
{
int i, j;
for (i=0; i<size; i++)
for(j=0; j<REPEAT; j++)
h_odata[i] = h_idata[i] * expf(h_jdata[i]);
}
|
08e7cab35ba0464aae056255e72d387f5a9672d3.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define REPEAT 1
__global__ void arrayFunc(float* d_idata, float* d_jdata, float* d_odata, int size)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
for(int i=0; i < REPEAT; i++)
d_odata[tid] = d_idata[tid] * __expf(d_jdata[tid]);
}
}
void initArrayData(float * array, float alpha, int size);
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size);
void getChunkInfo(int gpuID, int i, int *d_offset, int *chunk_size, int *h_offset, int *chunk_stream, int nSize, int chunk_size_max, int num_chunk, int num_streams, int gpu_number);
#define NSIZE 2097152
#define CHUNKSIZEMAX 65536
#define NUMSTREAMS 8
#define NGPU 2
int
main (void) {
float *h_a, *h_b, *h_c;
//-- insert CUDA code ----------------
// buffers declarations
float *d_a[NGPU], *d_b[NGPU], *d_c[NGPU];
//------------------------------------
int nsize = NSIZE;
int nThreads = 256;
int nBlocks;
cudaEvent_t start, end;
float eventEtime;
int chunk_size_max = CHUNKSIZEMAX;
int num_streams = NUMSTREAMS;
int num_chunk;
int i;
int h_offset, d_offset;
int chunk_size, chunk_stream;
//-- insert CUDA code ----------------
// streams declarations
cudaStream_t streams[NGPU][NUMSTREAMS];
//------------------------------------
int gpuID, gpu_number;
// allocation and initialization of host buffers
cudaMallocHost((void**)&h_a, nsize * sizeof(float));
cudaMallocHost((void**)&h_b, nsize * sizeof(float));
cudaMallocHost((void**)&h_c, nsize * sizeof(float));
initArrayData(h_a, 1.0f, nsize);
initArrayData(h_b, 10.0f, nsize);
cudaGetDeviceCount( &gpu_number );
// chunk number calculation
num_chunk = (nsize/gpu_number-1) / chunk_size_max + 1;
printf("Number of gpu: %d\n", gpu_number);
printf("Number of elements: %d\n", nsize);
printf("Number of streams: %d\n", num_streams);
printf("Number of chunks per GPU: %d\n", num_chunk);
for (gpuID=0; gpuID<gpu_number; gpuID++) {
//-- insert CUDA code ----------------
// GPU selection
cudaSetDevice(gpuID);
// allocation of device buffers
cudaMalloc((void**)&d_a[gpuID], num_streams * chunk_size_max * sizeof(float));
cudaMalloc((void**)&d_b[gpuID], num_streams * chunk_size_max * sizeof(float));
cudaMalloc((void**)&d_c[gpuID], num_streams * chunk_size_max * sizeof(float));
// streams creation
for (i = 0; i< num_streams; i++)
cudaStreamCreate(&streams[gpuID][i]);
//------------------------------------
}
// creation of cuda events: start, end
cudaSetDevice(0);
cudaEventCreate(&start);
cudaEventCreate(&end);
printf ("\nGPU computation ... ");
cudaEventRecord(start,0);
for (gpuID = 0; gpuID < gpu_number; gpuID++) {
cudaSetDevice(gpuID);
for (i = 0; i < num_chunk; i++) {
// please see getChunkInfo function description
getChunkInfo(gpuID, i, &d_offset, &chunk_size, &h_offset, &chunk_stream, nsize, chunk_size_max, num_chunk, num_streams, gpu_number);
//-- insert CUDA code ----------------
// host to device buffer copies
cudaMemcpyAsync(d_a[gpuID]+d_offset, h_a+h_offset, chunk_size*sizeof(float), cudaMemcpyHostToDevice, streams[gpuID][chunk_stream]);
cudaMemcpyAsync(d_b[gpuID]+d_offset, h_b+h_offset, chunk_size*sizeof(float), cudaMemcpyHostToDevice, streams[gpuID][chunk_stream]);
//------------------------------------
// block number calculation
nBlocks = (chunk_size-1) / nThreads + 1;
//-- insert CUDA code ----------------
// arrayFunc kernel launch
arrayFunc<<<nBlocks,nThreads, 0, streams[gpuID][chunk_stream]>>>(d_a[gpuID]+d_offset, d_b[gpuID]+d_offset, d_c[gpuID]+d_offset, chunk_size);
//------------------------------------
//-- insert CUDA code ----------------
// copy back of results from device
cudaMemcpyAsync(h_c+h_offset, d_c[gpuID]+d_offset, chunk_size*sizeof(float), cudaMemcpyDeviceToHost, streams[gpuID][chunk_stream]);
//------------------------------------
}
}
for (gpuID = 0; gpuID < gpu_number; gpuID++) {
cudaSetDevice(gpuID);
cudaDeviceSynchronize();
}
cudaSetDevice(0);
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&eventEtime, start, end);
printf ("ok\n");
printf("Elapsed time on GPU: %.2f ms\n", eventEtime);
// host computation
printf("\nCPU computation ... ");
float *cpuResult;
float eventTimeCPU;
cudaMallocHost((void**)&cpuResult, nsize * sizeof(float));
cudaEventRecord(start,0);
arrayFuncCPU(h_a, h_b, cpuResult, nsize);
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&eventTimeCPU, start, end);
printf ("ok\n");
printf("Elapsed time on CPU: %.2f ms\n", eventTimeCPU);
printf("\nSpeed UP CPU/GPU %.1fx\n", eventTimeCPU/eventEtime);
printf("\nCheck results:\n");
printf ("h_c[0] = %f\n", h_c[0]);
printf ("cpuResult[0] = %f\n", cpuResult[0]);
// free resources on device
for (gpuID = 0; gpuID < gpu_number; gpuID++) {
cudaSetDevice(gpuID);
for (i = 0; i< num_streams; i++)
cudaStreamDestroy(streams[gpuID][i]);
}
cudaSetDevice(0);
cudaEventDestroy(start);
cudaEventDestroy(end);
// free resources on host
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
return 0;
}
void
initArrayData(float * array, float alpha, int size)
{
int i;
for (i=0; i< size; i++)
array[i] = alpha * (float) rand() / (float) RAND_MAX;
}
// getChunkInfo is used to compute some useful information starting
// from the i-th chunk, the total number of used chunks,
// the maximum chunk size and the array size to process
// getChunkInfo returns:
// * chunk_size: the number of elements to use in current chunk
// * chunk_stream: the stream to use to process i-th chunk
// * the X_offsets to use for accessing the correct elements of host
// and device arrays in data movements and kernel launch
//
void getChunkInfo(int gpuID, int i, int *d_offset, int *chunk_size, int *h_offset, int *chunk_stream, int nSize, int chunk_size_max, int num_chunk, int num_streams, int gpu_number){
int Reminder = nSize%chunk_size_max;
*h_offset = i*chunk_size_max + gpuID * (nSize/gpu_number);
*chunk_stream = i%num_streams;
*chunk_size = chunk_size_max;
*d_offset = *chunk_stream * chunk_size_max;
if (Reminder && (i == num_chunk-1)) *chunk_size = Reminder;
}
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size)
{
int i, j;
for (i=0; i<size; i++)
for(j=0; j<REPEAT; j++)
h_odata[i] = h_idata[i] * expf(h_jdata[i]);
}
|
b685fe78dd0b07a3e63c9615bd172a9248b7bbde.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <limits.h>
#include <ctype.h>
#include <float.h>
#include <algorithm>
#include <math.h>
#include "rocblas.h"
#include "mex.h"
#include "hip/hip_runtime.h"
#include "cuSVMutil.h"
#include <vector>
__constant__ float C;
__constant__ float taumin;
__constant__ float kernelwidth;
template <unsigned int blockSize>
__global__ void FindBJ(float *d_F, float* d_y,float* d_alpha,float* d_KernelCol,float *g_odata,int* g_index,float BIValue, unsigned int n)
{
__shared__ float sdata[blockSize];
__shared__ int ind[blockSize];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid]=-FLT_MAX;
ind[tid]=0;
float temp;
float globaltemp;
float LocalCloseY;
float LocalFarY;
float maxtemp;
float denomclose;
float denomfar=1.f;
while (i < n)
{
LocalCloseY=d_y[i];
LocalFarY=(i+blockSize)<n ? d_y[i+blockSize]:0.f;
denomclose=(2.f-2.f*d_KernelCol[i]);
if(i+blockSize<n){denomfar=(2.f-2.f*d_KernelCol[i+blockSize]);}
denomclose=denomclose<taumin?taumin:denomclose;
denomfar=denomfar<taumin?taumin:denomfar;
maxtemp=
fmaxf(
globaltemp=
(LocalCloseY*d_alpha[i])>(LocalCloseY==1?0:-C) ?
__fdividef(__powf(BIValue+LocalCloseY*d_F[i],2.f),denomclose)
:-FLT_MAX,
i+blockSize<n ?
((LocalFarY*d_alpha[i+blockSize])>(LocalFarY==1?0:-C)?
__fdividef(__powf(BIValue+LocalFarY*d_F[i+blockSize],2.f),denomfar)
:-FLT_MAX)
:-FLT_MAX);
sdata[tid]=fmaxf(temp=sdata[tid],maxtemp);
if (sdata[tid]!=temp)
{
sdata[tid]== globaltemp ? ind[tid]=i : ind[tid]=i+blockSize;
}
i += gridSize;
}
__syncthreads();
if (tid < 128){ if (sdata[tid] < sdata[tid + 128]){ ind[tid]=ind[tid+128];sdata[tid]=sdata[tid+128]; }} __syncthreads();
if (tid < 64){ if (sdata[tid] < sdata[tid + 64]){ ind[tid]=ind[tid+64];sdata[tid]=sdata[tid+64]; }} __syncthreads();
if (tid < 32)
{
if (sdata[tid] <sdata[tid + 32]) {ind[tid]=ind[tid+32];sdata[tid]=sdata[tid+32];} __syncthreads();
if (sdata[tid] <sdata[tid + 16]) {ind[tid]=ind[tid+16];sdata[tid]=sdata[tid+16];} __syncthreads();
if (sdata[tid] <sdata[tid + 8]) {ind[tid]=ind[tid+8];sdata[tid]=sdata[tid+8];} __syncthreads();
if (sdata[tid] <sdata[tid + 4]) {ind[tid]=ind[tid+4];sdata[tid]=sdata[tid+4];} __syncthreads();
if (sdata[tid] <sdata[tid + 2]) {ind[tid]=ind[tid+2];sdata[tid]=sdata[tid+2];} __syncthreads();
if (sdata[tid] <sdata[tid + 1]) {ind[tid]=ind[tid+1];sdata[tid]=sdata[tid+1];} __syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
if (tid == 0) g_index[blockIdx.x] = ind[0];
}
template <unsigned int blockSize>
__global__ void FindBI(float *d_F, float* d_y,float* d_alpha,float *g_odata,int* g_index,unsigned int n)
{
__shared__ float sdata[blockSize];
__shared__ int ind[blockSize];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid]=-FLT_MAX;
ind[tid]=0;
float temp;
float globaltemp;
float LocalCloseY;
float LocalFarY;
float maxtemp;
while (i < n)
{
LocalCloseY=d_y[i];
LocalFarY=(i+blockSize)<n ? d_y[i+blockSize]:0;
maxtemp=
fmaxf(
globaltemp=
(LocalCloseY*d_alpha[i])<(LocalCloseY==1?C:0) ?
-(d_F[i]*LocalCloseY)
:-FLT_MAX,
i+blockSize<n ?
((LocalFarY*d_alpha[i+blockSize])<(LocalFarY==1?C:0) ?
-(d_F[i+blockSize]*LocalFarY)
:-FLT_MAX)
:-FLT_MAX);
sdata[tid]=fmaxf(temp=sdata[tid],maxtemp);
if (sdata[tid]!=temp)
{
sdata[tid]== globaltemp ? ind[tid]=i : ind[tid]=i+blockSize;
}
i += gridSize;
}
__syncthreads();
if (tid < 128){ if (sdata[tid] < sdata[tid + 128]){ ind[tid]=ind[tid+128];sdata[tid]=sdata[tid+128]; }} __syncthreads();
if (tid < 64){ if (sdata[tid] < sdata[tid + 64]){ ind[tid]=ind[tid+64];sdata[tid]=sdata[tid+64]; }} __syncthreads();
if (tid < 32)
{
if (sdata[tid] <sdata[tid + 32]) {ind[tid]=ind[tid+32];sdata[tid]=sdata[tid+32];} __syncthreads();
if (sdata[tid] <sdata[tid + 16]) {ind[tid]=ind[tid+16];sdata[tid]=sdata[tid+16];} __syncthreads();
if (sdata[tid] <sdata[tid + 8]) {ind[tid]=ind[tid+8];sdata[tid]=sdata[tid+8];} __syncthreads();
if (sdata[tid] <sdata[tid + 4]) {ind[tid]=ind[tid+4];sdata[tid]=sdata[tid+4];} __syncthreads();
if (sdata[tid] <sdata[tid + 2]) {ind[tid]=ind[tid+2];sdata[tid]=sdata[tid+2];} __syncthreads();
if (sdata[tid] <sdata[tid + 1]) {ind[tid]=ind[tid+1];sdata[tid]=sdata[tid+1];} __syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
if (tid == 0) g_index[blockIdx.x] = ind[0];
}
template <unsigned int blockSize>
__global__ void FindStoppingJ(float *d_F, float* d_y,float* d_alpha,float *g_odata,unsigned int n)
{
__shared__ float sdata[blockSize];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid]=FLT_MAX;
float LocalCloseY;
float LocalFarY;
while (i < n)
{
LocalCloseY=d_y[i];
LocalFarY=(i+blockSize)<n ? d_y[i+blockSize]:0;
sdata[tid]=
fminf(
sdata[tid],
fminf(
(LocalCloseY*d_alpha[i])>(LocalCloseY==1?0:-C) ?
-(d_F[i]*LocalCloseY)
:FLT_MAX,
i+blockSize<n ?
((LocalFarY*d_alpha[i+blockSize])>(LocalFarY==1?0:-C)?
-(d_F[i+blockSize]*LocalFarY)
:FLT_MAX)
:FLT_MAX));
i += gridSize;
}
__syncthreads();
if (tid < 128){ sdata[tid]=fminf(sdata[tid],sdata[tid+128]);} __syncthreads();
if (tid < 64){ sdata[tid]=fminf(sdata[tid],sdata[tid+64]);} __syncthreads();
if (tid < 32) {
sdata[tid]=fminf(sdata[tid],sdata[tid+32]); __syncthreads();
sdata[tid]=fminf(sdata[tid],sdata[tid+16]); __syncthreads();
sdata[tid]=fminf(sdata[tid],sdata[tid+8]); __syncthreads();
sdata[tid]=fminf(sdata[tid],sdata[tid+4]); __syncthreads();
sdata[tid]=fminf(sdata[tid],sdata[tid+2]); __syncthreads();
sdata[tid]=fminf(sdata[tid],sdata[tid+1]); __syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void UpdateF(float * F,float *KernelColI,float* KernelColJ, float* d_y,float deltaalphai,float deltaalphaj,float yi,float yj,int n)
{
int totalThreads,ctaStart,tid;
totalThreads = gridDim.x*blockDim.x;
ctaStart = blockDim.x*blockIdx.x;
tid = threadIdx.x;
int i;
for (i = ctaStart + tid; i < n; i += totalThreads)
{
F[i] = F[i] + yi*d_y[i]*deltaalphai*KernelColI[i]+yj*d_y[i]*deltaalphaj*KernelColJ[i];
}
}
__global__ void RBFFinish(float *KernelCol, const float * KernelDotProd,const float* DotProd,const float* DotProdRow,const int n)
{
int totalThreads,ctaStart,tid;
totalThreads = gridDim.x*blockDim.x;
ctaStart = blockDim.x*blockIdx.x;
tid = threadIdx.x;
int i;
float temp;
for (i = ctaStart + tid; i < n; i += totalThreads)
{
KernelCol[i] = expf(kernelwidth*(DotProd[i]+*DotProdRow-KernelDotProd[i]*2.f));
}
}
void RBFKernel(float *d_KernelJ,const int BJIndex,const float *d_x,const float * d_Kernel_InterRow,float *d_KernelDotProd, float *d_SelfDotProd,const int& m,const int& n,const int &nbrCtas,const int& threadsPerCta)
{
hipblasSgemv ('n', m, n, 1,d_x, m, d_Kernel_InterRow, 1, 0, d_KernelDotProd, 1);
hipLaunchKernelGGL(( RBFFinish), dim3(nbrCtas),dim3(threadsPerCta), 0, 0, d_KernelJ, d_KernelDotProd,d_SelfDotProd,d_SelfDotProd+BJIndex,m);
}
void CpuMaxInd(float &BIValue, int &BIIndex,const float * value_inter,const int * index_inter,const int n)
{
BIValue=value_inter[0];
BIIndex=index_inter[0];
for(int j=0;j<n;j++)
{
if (value_inter[j]>BIValue)
{
BIValue=value_inter[j];
BIIndex=index_inter[j];
}
}
}
void CpuMaxIndSvr(float &BIValue, int &BIIndex, const float * value_inter,const int * index_inter,int n,const int m)
{
BIValue=value_inter[0];
BIIndex=index_inter[0];
for(int j=0;j<n;j++)
{
if (value_inter[j]>BIValue)
{
BIValue=value_inter[j];
BIIndex=j<n/2?index_inter[j]:index_inter[j]+m;
}
}
}
void CpuMin(float &SJValue, float * value_inter,int n)
{
SJValue=value_inter[0];
for(int j=0;j<n;j++)
{
if (value_inter[j]<SJValue)
{
SJValue=value_inter[j];
}
}
}
void DotProdVector(float * x, float* dotprod,int m, int n)
{
for(int i=0;i<m;i++)
{
dotprod[i]=0;
for(int j=0;j<n;j++)
dotprod[i]+=(x[i+j*m])*(x[i+j*m]);
}
}
void IncrementKernelCache(std::vector<int>& KernelCacheItersSinceUsed,const int &RowsInKernelCache)
{
for(int k=0;k<RowsInKernelCache;k++)
{
KernelCacheItersSinceUsed[k]+=1;
}
}
inline void UpdateAlphas(float& alphai,float& alphaj,const float& Kij,const float& yi,const float& yj,const float& Fi,const float& Fj,const float& C,const float& h_taumin)
{
//This alpha update code is adapted from that in LIBSVM.
//Chih-Chung Chang and Chih-Jen Lin, LIBSVM : a library for support vector machines, 2001. Software available at http://www.csie.ntu.edu.tw/~cjlin/libsvm
float lambda;
float lambda_denom;
lambda_denom=2.0-2.0*Kij;
if (lambda_denom<h_taumin) {lambda_denom=h_taumin;}
if (yi!=yj)
{
lambda=(-Fi-Fj)/lambda_denom;
float alphadiff=alphai-alphaj;
alphai+=lambda;
alphaj+=lambda;
if(alphadiff > 0)
{
if(alphaj < 0)
{
alphaj = 0;
alphai = alphadiff;
}
}
else
{
if(alphai < 0)
{
alphai = 0;
alphaj = -alphadiff;
}
}
if(alphadiff > 0)
{
if(alphai > C)
{
alphai = C;
alphaj = C - alphadiff;
}
}
else
{
if(alphaj > C)
{
alphaj = C;
alphai = C + alphadiff;
}
}
}
else
{
float alphasum=alphai+alphaj;
lambda=(Fi-Fj)/lambda_denom;
alphai-=lambda;
alphaj+=lambda;
if(alphasum > C)
{
if(alphai > C)
{
alphai = C;
alphaj = alphasum - C;
}
if(alphaj > C)
{
alphaj = C;
alphai = alphasum - C;
}
}
else
{
if(alphaj < 0)
{
alphaj = 0;
alphai = alphasum;
}
if(alphai < 0)
{
alphai = 0;
alphaj = alphasum;
}
}
}
}
extern "C"
void SVRTrain(float *mexalpha,float* beta,float*y,float *x ,float _C, float _kernelwidth, float eps, int m, int n, float StoppingCrit)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
mxArray *mexelapsed =mxCreateNumericMatrix(1, 1,mxSINGLE_CLASS, mxREAL);
float * elapsed=(float *)mxGetData(mexelapsed);
hipEventRecord(start,0);
hipblasInit();
int numBlocks=64;
dim3 ReduceGrid(numBlocks, 1, 1);
dim3 ReduceBlock(256, 1, 1);
float h_taumin=0.0001;
mxCUDA_SAFE_CALL(hipMemcpyToSymbol(taumin, &h_taumin, sizeof(float)));
_kernelwidth*=-1;
mxCUDA_SAFE_CALL(hipMemcpyToSymbol(kernelwidth, &_kernelwidth, sizeof(float)));
mxCUDA_SAFE_CALL(hipMemcpyToSymbol(C, &_C, sizeof(float)));
float *alphasvr=new float [2*m];
float *ybinary=new float [2*m];
float *F=new float [2*m];
for(int j=0;j<m;j++)
{
alphasvr[j]=0;
ybinary[j]=1;
F[j]=-y[j]+eps;
alphasvr[j+m]=0;
ybinary[j+m]=-1;
F[j+m]=y[j]+eps;
}
float *SelfDotProd=new float [m];
DotProdVector(x, SelfDotProd,m, n);
int nbrCtas;
int elemsPerCta;
int threadsPerCta;
VectorSplay (m, SAXPY_THREAD_MIN, SAXPY_THREAD_MAX, SAXPY_CTAS_MAX, &nbrCtas, &elemsPerCta,&threadsPerCta);
float * d_x;
float * d_xT;
float * d_alpha;
float* d_y;
float* d_F;
float *d_KernelDotProd;
float *d_SelfDotProd;
float *d_KernelJ;
float *d_KernelI;
float* d_KernelInterRow;
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_x, m*n*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_xT, m*n*sizeof(float)));
mxCUDA_SAFE_CALL(hipMemcpy(d_x, x, sizeof(float)*n*m,hipMemcpyHostToDevice));
dim3 gridtranspose(ceil((float)m / TRANS_BLOCK_DIM), ceil((float)n / TRANS_BLOCK_DIM), 1);
dim3 threadstranspose(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM, 1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( transpose), dim3(gridtranspose), dim3(threadstranspose) , 0, 0, d_xT, d_x, m, n);
float *xT=new float [n*m];
mxCUDA_SAFE_CALL(hipMemcpy(xT, d_xT, sizeof(float)*m*n,hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipFree(d_xT));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_KernelInterRow, n*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_alpha, 2*m*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_y, 2*m*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_F, 2*m*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_SelfDotProd, m*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_KernelDotProd, m*sizeof(float)));
mxCUDA_SAFE_CALL(hipMemcpy(d_y, ybinary, sizeof(float)*m*2,hipMemcpyHostToDevice));
mxCUDA_SAFE_CALL(hipMemcpy(d_alpha, alphasvr, sizeof(float)*m*2,hipMemcpyHostToDevice));
mxCUDA_SAFE_CALL(hipMemcpy(d_F, F, sizeof(float)*m*2,hipMemcpyHostToDevice));
mxCUDA_SAFE_CALL(hipMemcpy(d_SelfDotProd, SelfDotProd, sizeof(float)*m,hipMemcpyHostToDevice));
delete [] F;
delete [] SelfDotProd;
float* value_inter;
int* index_inter;
float* value_inter_svr;
int* index_inter_svr;
hipHostMalloc( (void**)&value_inter, numBlocks*sizeof(float) );
hipHostMalloc( (void**)&index_inter, numBlocks*sizeof(int) );
hipHostMalloc( (void**)&value_inter_svr, 2*numBlocks*sizeof(float) );
hipHostMalloc( (void**)&index_inter_svr, 2*numBlocks*sizeof(int) );
float* d_value_inter;
int* d_index_inter;
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_value_inter, numBlocks*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_index_inter, numBlocks*sizeof(int)));
size_t free_mem, total;
cuMemGetInfo(&free_mem, &total);
int KernelCacheSize=free_mem-MBtoLeave*1024*1024;
int RowsInKernelCache=KernelCacheSize/(sizeof(float)*m);
float *d_Kernel_Cache;
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_Kernel_Cache, KernelCacheSize));
std::vector<int> KernelCacheIndices(RowsInKernelCache,-1);
std::vector<int> KernelCacheItersSinceUsed(RowsInKernelCache,0);
std::vector<int>::iterator CachePosI;
std::vector<int>::iterator CachePosJ;
int CacheDiffI;
int CacheDiffJ;
int CheckStoppingCritEvery=255;
int iter=0;
float BIValue;
int BIIndex;
float SJValue;
float BJSecondOrderValue;
int BJIndex;
float Kij;
float yj;
float yi;
float alphai;
float alphaj;
float oldalphai;
float oldalphaj;
float Fi;
float Fj;
while (1)
{
hipLaunchKernelGGL(( FindBI<256>), dim3(ReduceGrid), dim3(ReduceBlock), 0, 0, d_F, d_y,d_alpha,d_value_inter,d_index_inter, 2*m);
mxCUDA_SAFE_CALL(hipMemcpy(value_inter, d_value_inter, sizeof(float)*numBlocks,hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(index_inter, d_index_inter, sizeof(int)*numBlocks,hipMemcpyDeviceToHost));
hipDeviceSynchronize();
CpuMaxInd(BIValue,BIIndex,value_inter,index_inter,numBlocks);
if ((iter & CheckStoppingCritEvery)==0)
{
hipLaunchKernelGGL(( FindStoppingJ<256>), dim3(ReduceGrid), dim3(ReduceBlock), 0, 0, d_F, d_y,d_alpha,d_value_inter, 2*m);
mxCUDA_SAFE_CALL(hipMemcpy(value_inter, d_value_inter, sizeof(float)*numBlocks,hipMemcpyDeviceToHost));
hipDeviceSynchronize();
CpuMin(SJValue,value_inter,numBlocks);
if(BIValue-SJValue<StoppingCrit) {*beta=(SJValue+BIValue)/2; break;}
}
CachePosI=find(KernelCacheIndices.begin(),KernelCacheIndices.end(),(BIIndex>=m?BIIndex-m:BIIndex));
if (CachePosI ==KernelCacheIndices.end())
{
CacheDiffI=max_element(KernelCacheItersSinceUsed.begin(),KernelCacheItersSinceUsed.end())-KernelCacheItersSinceUsed.begin();
d_KernelI=d_Kernel_Cache+CacheDiffI*m;
mxCUDA_SAFE_CALL(hipMemcpy(d_KernelInterRow, xT+(BIIndex>=m?BIIndex-m:BIIndex)*n, n*sizeof(float),hipMemcpyHostToDevice));
RBFKernel(d_KernelI,(BIIndex>=m?BIIndex-m:BIIndex),d_x,d_KernelInterRow,d_KernelDotProd,d_SelfDotProd,m,n,nbrCtas,threadsPerCta);
*(KernelCacheIndices.begin()+CacheDiffI)=(BIIndex>=m?BIIndex-m:BIIndex);
}
else
{
CacheDiffI=CachePosI-KernelCacheIndices.begin();
d_KernelI=d_Kernel_Cache+m*CacheDiffI;
}
*(KernelCacheItersSinceUsed.begin()+CacheDiffI)=-1;
hipLaunchKernelGGL(( FindBJ<256>), dim3(ReduceGrid), dim3(ReduceBlock), 0, 0, d_F, d_y,d_alpha,d_KernelI,d_value_inter,d_index_inter,BIValue, m);
mxCUDA_SAFE_CALL(hipMemcpy(value_inter_svr, d_value_inter, sizeof(float)*numBlocks,hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(index_inter_svr, d_index_inter, sizeof(int)*numBlocks,hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( FindBJ<256>), dim3(ReduceGrid), dim3(ReduceBlock), 0, 0, d_F+m, d_y+m,d_alpha+m,d_KernelI,d_value_inter,d_index_inter,BIValue,m);
mxCUDA_SAFE_CALL(hipMemcpy(value_inter_svr+numBlocks, d_value_inter, sizeof(float)*numBlocks,hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(index_inter_svr+numBlocks, d_index_inter, sizeof(int)*numBlocks,hipMemcpyDeviceToHost));
hipDeviceSynchronize();
CpuMaxIndSvr(BJSecondOrderValue,BJIndex,value_inter_svr,index_inter_svr,2*numBlocks,m);
mxCUDA_SAFE_CALL(hipMemcpy(&Kij, d_KernelI+(BJIndex>=m?BJIndex-m:BJIndex), sizeof(float),hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(&alphai, d_alpha+BIIndex, sizeof(float),hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(&alphaj, d_alpha+BJIndex, sizeof(float),hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(&yi, d_y+BIIndex, sizeof(float),hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(&yj, d_y+BJIndex, sizeof(float),hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(&Fi, d_F+BIIndex, sizeof(float),hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(&Fj, d_F+BJIndex, sizeof(float),hipMemcpyDeviceToHost));
oldalphai=alphai;
oldalphaj=alphaj;
UpdateAlphas(alphai,alphaj,Kij,yi,yj,Fi,Fj,_C,h_taumin);
mxCUDA_SAFE_CALL(hipMemcpy(d_alpha+BIIndex, &alphai, sizeof(float),hipMemcpyHostToDevice));
mxCUDA_SAFE_CALL(hipMemcpy(d_alpha+BJIndex, &alphaj, sizeof(float),hipMemcpyHostToDevice));
float deltaalphai = alphai - oldalphai;
float deltaalphaj = alphaj - oldalphaj;
CachePosJ=find(KernelCacheIndices.begin(),KernelCacheIndices.end(),(BJIndex>=m?BJIndex-m:BJIndex));
if (CachePosJ ==KernelCacheIndices.end())
{
CacheDiffJ=max_element(KernelCacheItersSinceUsed.begin(),KernelCacheItersSinceUsed.end())-KernelCacheItersSinceUsed.begin();
d_KernelJ=d_Kernel_Cache+CacheDiffJ*m;
mxCUDA_SAFE_CALL(hipMemcpy(d_KernelInterRow, xT+(BJIndex>=m?BJIndex-m:BJIndex)*n, n*sizeof(float),hipMemcpyHostToDevice));
RBFKernel(d_KernelJ,(BJIndex>=m?BJIndex-m:BJIndex),d_x,d_KernelInterRow,d_KernelDotProd,d_SelfDotProd,m,n,nbrCtas,threadsPerCta);
*(KernelCacheIndices.begin()+CacheDiffJ)=(BJIndex>=m?BJIndex-m:BJIndex);
}
else
{
CacheDiffJ=CachePosJ-KernelCacheIndices.begin();
d_KernelJ=d_Kernel_Cache+m*CacheDiffJ;
}
hipLaunchKernelGGL(( UpdateF), dim3(nbrCtas),dim3(threadsPerCta), 0, 0, d_F,d_KernelI,d_KernelJ,d_y,deltaalphai,deltaalphaj,yi,yj,m);
hipLaunchKernelGGL(( UpdateF), dim3(nbrCtas),dim3(threadsPerCta), 0, 0, d_F+m,d_KernelI,d_KernelJ,d_y+m,deltaalphai,deltaalphaj,yi,yj,m);
IncrementKernelCache(KernelCacheItersSinceUsed,RowsInKernelCache);
*(KernelCacheItersSinceUsed.begin()+CacheDiffI)=0;
*(KernelCacheItersSinceUsed.begin()+CacheDiffJ)=0;
iter++;
}
hipblasGetVector(m*2,sizeof(float),d_alpha,1,alphasvr,1);
for(int k=0;k<m;k++)
{
mexalpha[k]=(alphasvr[k]-alphasvr[k+m])*ybinary[k];
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(elapsed, start, stop);
mexPutVariable("base","cuSVMTrainTimeInMS",mexelapsed);
delete [] ybinary;
delete [] alphasvr;
delete [] xT;
hipHostFree(value_inter_svr);
hipHostFree(index_inter_svr);
hipHostFree(value_inter);
hipHostFree(index_inter);
mxCUDA_SAFE_CALL(hipFree(d_x));
mxCUDA_SAFE_CALL(hipFree(d_y));
mxCUDA_SAFE_CALL(hipFree(d_alpha));
mxCUDA_SAFE_CALL(hipFree(d_Kernel_Cache));
mxCUDA_SAFE_CALL(hipFree(d_KernelInterRow));
mxCUDA_SAFE_CALL(hipFree(d_F));
mxCUDA_SAFE_CALL(hipFree(d_value_inter));
mxCUDA_SAFE_CALL(hipFree(d_index_inter));
mxCUDA_SAFE_CALL(hipFree(d_SelfDotProd));
mxCUDA_SAFE_CALL(hipFree(d_KernelDotProd));
mxCUDA_SAFE_CALL( hipDeviceReset());
return;
}
extern "C"
void SVMTrain(float *mexalpha,float* beta,float*y,float *x ,float _C, float _kernelwidth, int m, int n, float StoppingCrit)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
mxArray *mexelapsed =mxCreateNumericMatrix(1, 1,mxSINGLE_CLASS, mxREAL);
float * elapsed=(float *)mxGetData(mexelapsed);
hipEventRecord(start,0);
int numBlocks=64;
dim3 ReduceGrid(numBlocks, 1, 1);
dim3 ReduceBlock(256, 1, 1);
float h_taumin=0.0001;
mxCUDA_SAFE_CALL(hipMemcpyToSymbol(taumin, &h_taumin, sizeof(float)));
_kernelwidth*=-1;
mxCUDA_SAFE_CALL(hipMemcpyToSymbol(kernelwidth, &_kernelwidth, sizeof(float)));
mxCUDA_SAFE_CALL(hipMemcpyToSymbol(C, &_C, sizeof(float)));
float *h_alpha=new float [m];
float *h_F=new float [m];
for(int j=0;j<m;j++)
{
h_alpha[j]=0;
h_F[j]=-1;
}
float *SelfDotProd=new float [m];
DotProdVector(x, SelfDotProd,m, n);
int nbrCtas;
int elemsPerCta;
int threadsPerCta;
VectorSplay (m, SAXPY_THREAD_MIN, SAXPY_THREAD_MAX, SAXPY_CTAS_MAX, &nbrCtas, &elemsPerCta,&threadsPerCta);
float * d_x;
float * d_xT;
float * d_alpha;
float* d_y;
float* d_F;
float *d_KernelDotProd;
float *d_SelfDotProd;
float *d_KernelJ;
float *d_KernelI;
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_x, m*n*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_xT, m*n*sizeof(float)));
mxCUDA_SAFE_CALL(hipMemcpy(d_x, x, sizeof(float)*n*m,hipMemcpyHostToDevice));
dim3 gridtranspose(ceil((float)m / TRANS_BLOCK_DIM), ceil((float)n / TRANS_BLOCK_DIM), 1);
dim3 threadstranspose(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM, 1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( transpose), dim3(gridtranspose), dim3(threadstranspose) , 0, 0, d_xT, d_x, m, n);
float *xT=new float [n*m];
mxCUDA_SAFE_CALL(hipMemcpy(xT, d_xT, sizeof(float)*m*n,hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipFree(d_xT));
float* d_KernelInterRow;
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_KernelInterRow, n*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_alpha, m*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_y, m*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_F, m*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_SelfDotProd, m*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_KernelDotProd, m*sizeof(float)));
mxCUDA_SAFE_CALL(hipMemcpy(d_y, y, sizeof(float)*m,hipMemcpyHostToDevice));
mxCUDA_SAFE_CALL(hipMemcpy(d_alpha, h_alpha, sizeof(float)*m,hipMemcpyHostToDevice));
mxCUDA_SAFE_CALL(hipMemcpy(d_F, h_F, sizeof(float)*m,hipMemcpyHostToDevice));
mxCUDA_SAFE_CALL(hipMemcpy(d_SelfDotProd, SelfDotProd, sizeof(float)*m,hipMemcpyHostToDevice));
delete [] SelfDotProd;
float* value_inter;
int* index_inter;
hipHostMalloc( (void**)&value_inter, numBlocks*sizeof(float) );
hipHostMalloc( (void**)&index_inter, numBlocks*sizeof(int) );
float* d_value_inter;
int* d_index_inter;
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_value_inter, numBlocks*sizeof(float)));
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_index_inter, numBlocks*sizeof(int)));
size_t free_mem, total;
cuMemGetInfo(&free_mem, &total);
int KernelCacheSize=free_mem-MBtoLeave*1024*1024;
int RowsInKernelCache=KernelCacheSize/(sizeof(float)*m);
/* Do not use all memory available if not needed. */
if (RowsInKernelCache > m) {
RowsInKernelCache = m;
KernelCacheSize = m * sizeof(float) * m;
}
float *d_Kernel_Cache;
mxCUDA_SAFE_CALL(hipMalloc( (void**) &d_Kernel_Cache, KernelCacheSize));
std::vector<int> KernelCacheIndices(RowsInKernelCache,-1);
std::vector<int> KernelCacheItersSinceUsed(RowsInKernelCache,0);
std::vector<int>::iterator CachePosI;
std::vector<int>::iterator CachePosJ;
int CacheDiffI;
int CacheDiffJ;
int CheckStoppingCritEvery=255;
int iter=0;
float BIValue;
int BIIndex;
float SJValue;
float BJSecondOrderValue;
int BJIndex;
float Kij;
float yj;
float yi;
float alphai;
float alphaj;
float oldalphai;
float oldalphaj;
float Fi;
float Fj;
while (1)
{
hipLaunchKernelGGL(( FindBI<256>), dim3(ReduceGrid), dim3(ReduceBlock), 0, 0, d_F, d_y,d_alpha,d_value_inter,d_index_inter, m);
mxCUDA_SAFE_CALL(hipMemcpy(value_inter, d_value_inter, sizeof(float)*numBlocks,hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(index_inter, d_index_inter, sizeof(int)*numBlocks,hipMemcpyDeviceToHost));
hipDeviceSynchronize();
CpuMaxInd(BIValue,BIIndex,value_inter,index_inter,numBlocks);
hipMemcpy(&Fi, d_F+BIIndex, sizeof(float),hipMemcpyDeviceToHost);
if ((iter & CheckStoppingCritEvery)==0)
{
hipLaunchKernelGGL(( FindStoppingJ<256>), dim3(ReduceGrid), dim3(ReduceBlock), 0, 0, d_F, d_y,d_alpha,d_value_inter, m);
mxCUDA_SAFE_CALL(hipMemcpy(value_inter, d_value_inter, sizeof(float)*numBlocks,hipMemcpyDeviceToHost));
hipDeviceSynchronize();
CpuMin(SJValue,value_inter,numBlocks);
if(BIValue-SJValue<StoppingCrit)
{
if(BIValue-SJValue<StoppingCrit) {*beta=(SJValue+BIValue)/2; break;}
}
}
CachePosI=find(KernelCacheIndices.begin(),KernelCacheIndices.end(),BIIndex);
if (CachePosI ==KernelCacheIndices.end())
{
CacheDiffI=max_element(KernelCacheItersSinceUsed.begin(),KernelCacheItersSinceUsed.end())-KernelCacheItersSinceUsed.begin();
d_KernelI=d_Kernel_Cache+CacheDiffI*m;
mxCUDA_SAFE_CALL(hipMemcpy(d_KernelInterRow, xT+BIIndex*n, n*sizeof(float),hipMemcpyHostToDevice));
RBFKernel(d_KernelI,BIIndex,d_x,d_KernelInterRow,d_KernelDotProd,d_SelfDotProd,m,n,nbrCtas,threadsPerCta);
*(KernelCacheIndices.begin()+CacheDiffI)=BIIndex;
}
else
{
CacheDiffI=CachePosI-KernelCacheIndices.begin();
d_KernelI=d_Kernel_Cache+m*CacheDiffI;
}
*(KernelCacheItersSinceUsed.begin()+CacheDiffI)=-1;
hipLaunchKernelGGL(( FindBJ<256>), dim3(ReduceGrid), dim3(ReduceBlock), 0, 0, d_F, d_y,d_alpha,d_KernelI,d_value_inter,d_index_inter,BIValue, m);
mxCUDA_SAFE_CALL(hipMemcpy(value_inter, d_value_inter, sizeof(float)*numBlocks,hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(index_inter, d_index_inter, sizeof(int)*numBlocks,hipMemcpyDeviceToHost));
hipDeviceSynchronize();
CpuMaxInd(BJSecondOrderValue,BJIndex,value_inter,index_inter,numBlocks);
mxCUDA_SAFE_CALL(hipMemcpy(&Kij, d_KernelI+BJIndex, sizeof(float),hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(&alphai, d_alpha+BIIndex, sizeof(float),hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(&alphaj, d_alpha+BJIndex, sizeof(float),hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(&yi, d_y+BIIndex, sizeof(float),hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(&yj, d_y+BJIndex, sizeof(float),hipMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(hipMemcpy(&Fj, d_F+BJIndex, sizeof(float),hipMemcpyDeviceToHost));
oldalphai=alphai;
oldalphaj=alphaj;
UpdateAlphas(alphai,alphaj,Kij,yi,yj,Fi,Fj,_C,h_taumin);
mxCUDA_SAFE_CALL(hipMemcpy(d_alpha+BIIndex, &alphai, sizeof(float),hipMemcpyHostToDevice));
mxCUDA_SAFE_CALL(hipMemcpy(d_alpha+BJIndex, &alphaj, sizeof(float),hipMemcpyHostToDevice));
float deltaalphai = alphai - oldalphai;
float deltaalphaj = alphaj - oldalphaj;
CachePosJ=find(KernelCacheIndices.begin(),KernelCacheIndices.end(),BJIndex);
if (CachePosJ ==KernelCacheIndices.end())
{
CacheDiffJ=max_element(KernelCacheItersSinceUsed.begin(),KernelCacheItersSinceUsed.end())-KernelCacheItersSinceUsed.begin();
d_KernelJ=d_Kernel_Cache+CacheDiffJ*m;
mxCUDA_SAFE_CALL(hipMemcpy(d_KernelInterRow, xT+BJIndex*n, n*sizeof(float),hipMemcpyHostToDevice));
RBFKernel(d_KernelJ,BJIndex,d_x,d_KernelInterRow,d_KernelDotProd,d_SelfDotProd,m,n,nbrCtas,threadsPerCta);
*(KernelCacheIndices.begin()+CacheDiffJ)=BJIndex;
}
else
{
CacheDiffJ=CachePosJ-KernelCacheIndices.begin();
d_KernelJ=d_Kernel_Cache+m*CacheDiffJ;
}
hipLaunchKernelGGL(( UpdateF), dim3(nbrCtas),dim3(threadsPerCta), 0, 0, d_F,d_KernelI,d_KernelJ,d_y,deltaalphai,deltaalphaj,yi,yj,m);
IncrementKernelCache(KernelCacheItersSinceUsed,RowsInKernelCache);
*(KernelCacheItersSinceUsed.begin()+CacheDiffI)=0;
*(KernelCacheItersSinceUsed.begin()+CacheDiffJ)=0;
iter++;
}
hipblasGetVector(m,sizeof(float),d_alpha,1,mexalpha,1);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(elapsed, start, stop);
mexPutVariable("base","cuSVMTrainTimeInMS",mexelapsed);
delete [] xT;
hipHostFree(value_inter);
hipHostFree(index_inter);
mxCUDA_SAFE_CALL(hipFree(d_x));
mxCUDA_SAFE_CALL(hipFree(d_y));
mxCUDA_SAFE_CALL(hipFree(d_alpha));
mxCUDA_SAFE_CALL(hipFree(d_KernelInterRow));
mxCUDA_SAFE_CALL(hipFree(d_Kernel_Cache));
mxCUDA_SAFE_CALL(hipFree(d_F));
mxCUDA_SAFE_CALL(hipFree(d_value_inter));
mxCUDA_SAFE_CALL(hipFree(d_index_inter));
mxCUDA_SAFE_CALL(hipFree(d_SelfDotProd));
mxCUDA_SAFE_CALL(hipFree(d_KernelDotProd));
mxCUDA_SAFE_CALL(hipDeviceReset());
return;
}
|
b685fe78dd0b07a3e63c9615bd172a9248b7bbde.cu
|
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <limits.h>
#include <ctype.h>
#include <float.h>
#include <algorithm>
#include <math.h>
#include "cublas.h"
#include "mex.h"
#include "cuda.h"
#include "cuSVMutil.h"
#include <vector>
__constant__ float C;
__constant__ float taumin;
__constant__ float kernelwidth;
template <unsigned int blockSize>
__global__ void FindBJ(float *d_F, float* d_y,float* d_alpha,float* d_KernelCol,float *g_odata,int* g_index,float BIValue, unsigned int n)
{
__shared__ float sdata[blockSize];
__shared__ int ind[blockSize];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid]=-FLT_MAX;
ind[tid]=0;
float temp;
float globaltemp;
float LocalCloseY;
float LocalFarY;
float maxtemp;
float denomclose;
float denomfar=1.f;
while (i < n)
{
LocalCloseY=d_y[i];
LocalFarY=(i+blockSize)<n ? d_y[i+blockSize]:0.f;
denomclose=(2.f-2.f*d_KernelCol[i]);
if(i+blockSize<n){denomfar=(2.f-2.f*d_KernelCol[i+blockSize]);}
denomclose=denomclose<taumin?taumin:denomclose;
denomfar=denomfar<taumin?taumin:denomfar;
maxtemp=
fmaxf(
globaltemp=
(LocalCloseY*d_alpha[i])>(LocalCloseY==1?0:-C) ?
__fdividef(__powf(BIValue+LocalCloseY*d_F[i],2.f),denomclose)
:-FLT_MAX,
i+blockSize<n ?
((LocalFarY*d_alpha[i+blockSize])>(LocalFarY==1?0:-C)?
__fdividef(__powf(BIValue+LocalFarY*d_F[i+blockSize],2.f),denomfar)
:-FLT_MAX)
:-FLT_MAX);
sdata[tid]=fmaxf(temp=sdata[tid],maxtemp);
if (sdata[tid]!=temp)
{
sdata[tid]== globaltemp ? ind[tid]=i : ind[tid]=i+blockSize;
}
i += gridSize;
}
__syncthreads();
if (tid < 128){ if (sdata[tid] < sdata[tid + 128]){ ind[tid]=ind[tid+128];sdata[tid]=sdata[tid+128]; }} __syncthreads();
if (tid < 64){ if (sdata[tid] < sdata[tid + 64]){ ind[tid]=ind[tid+64];sdata[tid]=sdata[tid+64]; }} __syncthreads();
if (tid < 32)
{
if (sdata[tid] <sdata[tid + 32]) {ind[tid]=ind[tid+32];sdata[tid]=sdata[tid+32];} __syncthreads();
if (sdata[tid] <sdata[tid + 16]) {ind[tid]=ind[tid+16];sdata[tid]=sdata[tid+16];} __syncthreads();
if (sdata[tid] <sdata[tid + 8]) {ind[tid]=ind[tid+8];sdata[tid]=sdata[tid+8];} __syncthreads();
if (sdata[tid] <sdata[tid + 4]) {ind[tid]=ind[tid+4];sdata[tid]=sdata[tid+4];} __syncthreads();
if (sdata[tid] <sdata[tid + 2]) {ind[tid]=ind[tid+2];sdata[tid]=sdata[tid+2];} __syncthreads();
if (sdata[tid] <sdata[tid + 1]) {ind[tid]=ind[tid+1];sdata[tid]=sdata[tid+1];} __syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
if (tid == 0) g_index[blockIdx.x] = ind[0];
}
template <unsigned int blockSize>
__global__ void FindBI(float *d_F, float* d_y,float* d_alpha,float *g_odata,int* g_index,unsigned int n)
{
__shared__ float sdata[blockSize];
__shared__ int ind[blockSize];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid]=-FLT_MAX;
ind[tid]=0;
float temp;
float globaltemp;
float LocalCloseY;
float LocalFarY;
float maxtemp;
while (i < n)
{
LocalCloseY=d_y[i];
LocalFarY=(i+blockSize)<n ? d_y[i+blockSize]:0;
maxtemp=
fmaxf(
globaltemp=
(LocalCloseY*d_alpha[i])<(LocalCloseY==1?C:0) ?
-(d_F[i]*LocalCloseY)
:-FLT_MAX,
i+blockSize<n ?
((LocalFarY*d_alpha[i+blockSize])<(LocalFarY==1?C:0) ?
-(d_F[i+blockSize]*LocalFarY)
:-FLT_MAX)
:-FLT_MAX);
sdata[tid]=fmaxf(temp=sdata[tid],maxtemp);
if (sdata[tid]!=temp)
{
sdata[tid]== globaltemp ? ind[tid]=i : ind[tid]=i+blockSize;
}
i += gridSize;
}
__syncthreads();
if (tid < 128){ if (sdata[tid] < sdata[tid + 128]){ ind[tid]=ind[tid+128];sdata[tid]=sdata[tid+128]; }} __syncthreads();
if (tid < 64){ if (sdata[tid] < sdata[tid + 64]){ ind[tid]=ind[tid+64];sdata[tid]=sdata[tid+64]; }} __syncthreads();
if (tid < 32)
{
if (sdata[tid] <sdata[tid + 32]) {ind[tid]=ind[tid+32];sdata[tid]=sdata[tid+32];} __syncthreads();
if (sdata[tid] <sdata[tid + 16]) {ind[tid]=ind[tid+16];sdata[tid]=sdata[tid+16];} __syncthreads();
if (sdata[tid] <sdata[tid + 8]) {ind[tid]=ind[tid+8];sdata[tid]=sdata[tid+8];} __syncthreads();
if (sdata[tid] <sdata[tid + 4]) {ind[tid]=ind[tid+4];sdata[tid]=sdata[tid+4];} __syncthreads();
if (sdata[tid] <sdata[tid + 2]) {ind[tid]=ind[tid+2];sdata[tid]=sdata[tid+2];} __syncthreads();
if (sdata[tid] <sdata[tid + 1]) {ind[tid]=ind[tid+1];sdata[tid]=sdata[tid+1];} __syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
if (tid == 0) g_index[blockIdx.x] = ind[0];
}
template <unsigned int blockSize>
__global__ void FindStoppingJ(float *d_F, float* d_y,float* d_alpha,float *g_odata,unsigned int n)
{
__shared__ float sdata[blockSize];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid]=FLT_MAX;
float LocalCloseY;
float LocalFarY;
while (i < n)
{
LocalCloseY=d_y[i];
LocalFarY=(i+blockSize)<n ? d_y[i+blockSize]:0;
sdata[tid]=
fminf(
sdata[tid],
fminf(
(LocalCloseY*d_alpha[i])>(LocalCloseY==1?0:-C) ?
-(d_F[i]*LocalCloseY)
:FLT_MAX,
i+blockSize<n ?
((LocalFarY*d_alpha[i+blockSize])>(LocalFarY==1?0:-C)?
-(d_F[i+blockSize]*LocalFarY)
:FLT_MAX)
:FLT_MAX));
i += gridSize;
}
__syncthreads();
if (tid < 128){ sdata[tid]=fminf(sdata[tid],sdata[tid+128]);} __syncthreads();
if (tid < 64){ sdata[tid]=fminf(sdata[tid],sdata[tid+64]);} __syncthreads();
if (tid < 32) {
sdata[tid]=fminf(sdata[tid],sdata[tid+32]); __syncthreads();
sdata[tid]=fminf(sdata[tid],sdata[tid+16]); __syncthreads();
sdata[tid]=fminf(sdata[tid],sdata[tid+8]); __syncthreads();
sdata[tid]=fminf(sdata[tid],sdata[tid+4]); __syncthreads();
sdata[tid]=fminf(sdata[tid],sdata[tid+2]); __syncthreads();
sdata[tid]=fminf(sdata[tid],sdata[tid+1]); __syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void UpdateF(float * F,float *KernelColI,float* KernelColJ, float* d_y,float deltaalphai,float deltaalphaj,float yi,float yj,int n)
{
int totalThreads,ctaStart,tid;
totalThreads = gridDim.x*blockDim.x;
ctaStart = blockDim.x*blockIdx.x;
tid = threadIdx.x;
int i;
for (i = ctaStart + tid; i < n; i += totalThreads)
{
F[i] = F[i] + yi*d_y[i]*deltaalphai*KernelColI[i]+yj*d_y[i]*deltaalphaj*KernelColJ[i];
}
}
__global__ void RBFFinish(float *KernelCol, const float * KernelDotProd,const float* DotProd,const float* DotProdRow,const int n)
{
int totalThreads,ctaStart,tid;
totalThreads = gridDim.x*blockDim.x;
ctaStart = blockDim.x*blockIdx.x;
tid = threadIdx.x;
int i;
float temp;
for (i = ctaStart + tid; i < n; i += totalThreads)
{
KernelCol[i] = expf(kernelwidth*(DotProd[i]+*DotProdRow-KernelDotProd[i]*2.f));
}
}
void RBFKernel(float *d_KernelJ,const int BJIndex,const float *d_x,const float * d_Kernel_InterRow,float *d_KernelDotProd, float *d_SelfDotProd,const int& m,const int& n,const int &nbrCtas,const int& threadsPerCta)
{
cublasSgemv ('n', m, n, 1,d_x, m, d_Kernel_InterRow, 1, 0, d_KernelDotProd, 1);
RBFFinish<<<nbrCtas,threadsPerCta>>>(d_KernelJ, d_KernelDotProd,d_SelfDotProd,d_SelfDotProd+BJIndex,m);
}
void CpuMaxInd(float &BIValue, int &BIIndex,const float * value_inter,const int * index_inter,const int n)
{
BIValue=value_inter[0];
BIIndex=index_inter[0];
for(int j=0;j<n;j++)
{
if (value_inter[j]>BIValue)
{
BIValue=value_inter[j];
BIIndex=index_inter[j];
}
}
}
void CpuMaxIndSvr(float &BIValue, int &BIIndex, const float * value_inter,const int * index_inter,int n,const int m)
{
BIValue=value_inter[0];
BIIndex=index_inter[0];
for(int j=0;j<n;j++)
{
if (value_inter[j]>BIValue)
{
BIValue=value_inter[j];
BIIndex=j<n/2?index_inter[j]:index_inter[j]+m;
}
}
}
void CpuMin(float &SJValue, float * value_inter,int n)
{
SJValue=value_inter[0];
for(int j=0;j<n;j++)
{
if (value_inter[j]<SJValue)
{
SJValue=value_inter[j];
}
}
}
void DotProdVector(float * x, float* dotprod,int m, int n)
{
for(int i=0;i<m;i++)
{
dotprod[i]=0;
for(int j=0;j<n;j++)
dotprod[i]+=(x[i+j*m])*(x[i+j*m]);
}
}
void IncrementKernelCache(std::vector<int>& KernelCacheItersSinceUsed,const int &RowsInKernelCache)
{
for(int k=0;k<RowsInKernelCache;k++)
{
KernelCacheItersSinceUsed[k]+=1;
}
}
inline void UpdateAlphas(float& alphai,float& alphaj,const float& Kij,const float& yi,const float& yj,const float& Fi,const float& Fj,const float& C,const float& h_taumin)
{
//This alpha update code is adapted from that in LIBSVM.
//Chih-Chung Chang and Chih-Jen Lin, LIBSVM : a library for support vector machines, 2001. Software available at http://www.csie.ntu.edu.tw/~cjlin/libsvm
float lambda;
float lambda_denom;
lambda_denom=2.0-2.0*Kij;
if (lambda_denom<h_taumin) {lambda_denom=h_taumin;}
if (yi!=yj)
{
lambda=(-Fi-Fj)/lambda_denom;
float alphadiff=alphai-alphaj;
alphai+=lambda;
alphaj+=lambda;
if(alphadiff > 0)
{
if(alphaj < 0)
{
alphaj = 0;
alphai = alphadiff;
}
}
else
{
if(alphai < 0)
{
alphai = 0;
alphaj = -alphadiff;
}
}
if(alphadiff > 0)
{
if(alphai > C)
{
alphai = C;
alphaj = C - alphadiff;
}
}
else
{
if(alphaj > C)
{
alphaj = C;
alphai = C + alphadiff;
}
}
}
else
{
float alphasum=alphai+alphaj;
lambda=(Fi-Fj)/lambda_denom;
alphai-=lambda;
alphaj+=lambda;
if(alphasum > C)
{
if(alphai > C)
{
alphai = C;
alphaj = alphasum - C;
}
if(alphaj > C)
{
alphaj = C;
alphai = alphasum - C;
}
}
else
{
if(alphaj < 0)
{
alphaj = 0;
alphai = alphasum;
}
if(alphai < 0)
{
alphai = 0;
alphaj = alphasum;
}
}
}
}
extern "C"
void SVRTrain(float *mexalpha,float* beta,float*y,float *x ,float _C, float _kernelwidth, float eps, int m, int n, float StoppingCrit)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
mxArray *mexelapsed =mxCreateNumericMatrix(1, 1,mxSINGLE_CLASS, mxREAL);
float * elapsed=(float *)mxGetData(mexelapsed);
cudaEventRecord(start,0);
cublasInit();
int numBlocks=64;
dim3 ReduceGrid(numBlocks, 1, 1);
dim3 ReduceBlock(256, 1, 1);
float h_taumin=0.0001;
mxCUDA_SAFE_CALL(cudaMemcpyToSymbol(taumin, &h_taumin, sizeof(float)));
_kernelwidth*=-1;
mxCUDA_SAFE_CALL(cudaMemcpyToSymbol(kernelwidth, &_kernelwidth, sizeof(float)));
mxCUDA_SAFE_CALL(cudaMemcpyToSymbol(C, &_C, sizeof(float)));
float *alphasvr=new float [2*m];
float *ybinary=new float [2*m];
float *F=new float [2*m];
for(int j=0;j<m;j++)
{
alphasvr[j]=0;
ybinary[j]=1;
F[j]=-y[j]+eps;
alphasvr[j+m]=0;
ybinary[j+m]=-1;
F[j+m]=y[j]+eps;
}
float *SelfDotProd=new float [m];
DotProdVector(x, SelfDotProd,m, n);
int nbrCtas;
int elemsPerCta;
int threadsPerCta;
VectorSplay (m, SAXPY_THREAD_MIN, SAXPY_THREAD_MAX, SAXPY_CTAS_MAX, &nbrCtas, &elemsPerCta,&threadsPerCta);
float * d_x;
float * d_xT;
float * d_alpha;
float* d_y;
float* d_F;
float *d_KernelDotProd;
float *d_SelfDotProd;
float *d_KernelJ;
float *d_KernelI;
float* d_KernelInterRow;
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_x, m*n*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_xT, m*n*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMemcpy(d_x, x, sizeof(float)*n*m,cudaMemcpyHostToDevice));
dim3 gridtranspose(ceil((float)m / TRANS_BLOCK_DIM), ceil((float)n / TRANS_BLOCK_DIM), 1);
dim3 threadstranspose(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM, 1);
cudaThreadSynchronize();
transpose<<< gridtranspose, threadstranspose >>>(d_xT, d_x, m, n);
float *xT=new float [n*m];
mxCUDA_SAFE_CALL(cudaMemcpy(xT, d_xT, sizeof(float)*m*n,cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaFree(d_xT));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_KernelInterRow, n*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_alpha, 2*m*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_y, 2*m*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_F, 2*m*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_SelfDotProd, m*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_KernelDotProd, m*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMemcpy(d_y, ybinary, sizeof(float)*m*2,cudaMemcpyHostToDevice));
mxCUDA_SAFE_CALL(cudaMemcpy(d_alpha, alphasvr, sizeof(float)*m*2,cudaMemcpyHostToDevice));
mxCUDA_SAFE_CALL(cudaMemcpy(d_F, F, sizeof(float)*m*2,cudaMemcpyHostToDevice));
mxCUDA_SAFE_CALL(cudaMemcpy(d_SelfDotProd, SelfDotProd, sizeof(float)*m,cudaMemcpyHostToDevice));
delete [] F;
delete [] SelfDotProd;
float* value_inter;
int* index_inter;
float* value_inter_svr;
int* index_inter_svr;
cudaMallocHost( (void**)&value_inter, numBlocks*sizeof(float) );
cudaMallocHost( (void**)&index_inter, numBlocks*sizeof(int) );
cudaMallocHost( (void**)&value_inter_svr, 2*numBlocks*sizeof(float) );
cudaMallocHost( (void**)&index_inter_svr, 2*numBlocks*sizeof(int) );
float* d_value_inter;
int* d_index_inter;
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_value_inter, numBlocks*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_index_inter, numBlocks*sizeof(int)));
size_t free_mem, total;
cuMemGetInfo(&free_mem, &total);
int KernelCacheSize=free_mem-MBtoLeave*1024*1024;
int RowsInKernelCache=KernelCacheSize/(sizeof(float)*m);
float *d_Kernel_Cache;
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_Kernel_Cache, KernelCacheSize));
std::vector<int> KernelCacheIndices(RowsInKernelCache,-1);
std::vector<int> KernelCacheItersSinceUsed(RowsInKernelCache,0);
std::vector<int>::iterator CachePosI;
std::vector<int>::iterator CachePosJ;
int CacheDiffI;
int CacheDiffJ;
int CheckStoppingCritEvery=255;
int iter=0;
float BIValue;
int BIIndex;
float SJValue;
float BJSecondOrderValue;
int BJIndex;
float Kij;
float yj;
float yi;
float alphai;
float alphaj;
float oldalphai;
float oldalphaj;
float Fi;
float Fj;
while (1)
{
FindBI<256><<<ReduceGrid, ReduceBlock>>>(d_F, d_y,d_alpha,d_value_inter,d_index_inter, 2*m);
mxCUDA_SAFE_CALL(cudaMemcpy(value_inter, d_value_inter, sizeof(float)*numBlocks,cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(index_inter, d_index_inter, sizeof(int)*numBlocks,cudaMemcpyDeviceToHost));
cudaThreadSynchronize();
CpuMaxInd(BIValue,BIIndex,value_inter,index_inter,numBlocks);
if ((iter & CheckStoppingCritEvery)==0)
{
FindStoppingJ<256><<<ReduceGrid, ReduceBlock>>>(d_F, d_y,d_alpha,d_value_inter, 2*m);
mxCUDA_SAFE_CALL(cudaMemcpy(value_inter, d_value_inter, sizeof(float)*numBlocks,cudaMemcpyDeviceToHost));
cudaThreadSynchronize();
CpuMin(SJValue,value_inter,numBlocks);
if(BIValue-SJValue<StoppingCrit) {*beta=(SJValue+BIValue)/2; break;}
}
CachePosI=find(KernelCacheIndices.begin(),KernelCacheIndices.end(),(BIIndex>=m?BIIndex-m:BIIndex));
if (CachePosI ==KernelCacheIndices.end())
{
CacheDiffI=max_element(KernelCacheItersSinceUsed.begin(),KernelCacheItersSinceUsed.end())-KernelCacheItersSinceUsed.begin();
d_KernelI=d_Kernel_Cache+CacheDiffI*m;
mxCUDA_SAFE_CALL(cudaMemcpy(d_KernelInterRow, xT+(BIIndex>=m?BIIndex-m:BIIndex)*n, n*sizeof(float),cudaMemcpyHostToDevice));
RBFKernel(d_KernelI,(BIIndex>=m?BIIndex-m:BIIndex),d_x,d_KernelInterRow,d_KernelDotProd,d_SelfDotProd,m,n,nbrCtas,threadsPerCta);
*(KernelCacheIndices.begin()+CacheDiffI)=(BIIndex>=m?BIIndex-m:BIIndex);
}
else
{
CacheDiffI=CachePosI-KernelCacheIndices.begin();
d_KernelI=d_Kernel_Cache+m*CacheDiffI;
}
*(KernelCacheItersSinceUsed.begin()+CacheDiffI)=-1;
FindBJ<256><<<ReduceGrid, ReduceBlock>>>(d_F, d_y,d_alpha,d_KernelI,d_value_inter,d_index_inter,BIValue, m);
mxCUDA_SAFE_CALL(cudaMemcpy(value_inter_svr, d_value_inter, sizeof(float)*numBlocks,cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(index_inter_svr, d_index_inter, sizeof(int)*numBlocks,cudaMemcpyDeviceToHost));
FindBJ<256><<<ReduceGrid, ReduceBlock>>>(d_F+m, d_y+m,d_alpha+m,d_KernelI,d_value_inter,d_index_inter,BIValue,m);
mxCUDA_SAFE_CALL(cudaMemcpy(value_inter_svr+numBlocks, d_value_inter, sizeof(float)*numBlocks,cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(index_inter_svr+numBlocks, d_index_inter, sizeof(int)*numBlocks,cudaMemcpyDeviceToHost));
cudaThreadSynchronize();
CpuMaxIndSvr(BJSecondOrderValue,BJIndex,value_inter_svr,index_inter_svr,2*numBlocks,m);
mxCUDA_SAFE_CALL(cudaMemcpy(&Kij, d_KernelI+(BJIndex>=m?BJIndex-m:BJIndex), sizeof(float),cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(&alphai, d_alpha+BIIndex, sizeof(float),cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(&alphaj, d_alpha+BJIndex, sizeof(float),cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(&yi, d_y+BIIndex, sizeof(float),cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(&yj, d_y+BJIndex, sizeof(float),cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(&Fi, d_F+BIIndex, sizeof(float),cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(&Fj, d_F+BJIndex, sizeof(float),cudaMemcpyDeviceToHost));
oldalphai=alphai;
oldalphaj=alphaj;
UpdateAlphas(alphai,alphaj,Kij,yi,yj,Fi,Fj,_C,h_taumin);
mxCUDA_SAFE_CALL(cudaMemcpy(d_alpha+BIIndex, &alphai, sizeof(float),cudaMemcpyHostToDevice));
mxCUDA_SAFE_CALL(cudaMemcpy(d_alpha+BJIndex, &alphaj, sizeof(float),cudaMemcpyHostToDevice));
float deltaalphai = alphai - oldalphai;
float deltaalphaj = alphaj - oldalphaj;
CachePosJ=find(KernelCacheIndices.begin(),KernelCacheIndices.end(),(BJIndex>=m?BJIndex-m:BJIndex));
if (CachePosJ ==KernelCacheIndices.end())
{
CacheDiffJ=max_element(KernelCacheItersSinceUsed.begin(),KernelCacheItersSinceUsed.end())-KernelCacheItersSinceUsed.begin();
d_KernelJ=d_Kernel_Cache+CacheDiffJ*m;
mxCUDA_SAFE_CALL(cudaMemcpy(d_KernelInterRow, xT+(BJIndex>=m?BJIndex-m:BJIndex)*n, n*sizeof(float),cudaMemcpyHostToDevice));
RBFKernel(d_KernelJ,(BJIndex>=m?BJIndex-m:BJIndex),d_x,d_KernelInterRow,d_KernelDotProd,d_SelfDotProd,m,n,nbrCtas,threadsPerCta);
*(KernelCacheIndices.begin()+CacheDiffJ)=(BJIndex>=m?BJIndex-m:BJIndex);
}
else
{
CacheDiffJ=CachePosJ-KernelCacheIndices.begin();
d_KernelJ=d_Kernel_Cache+m*CacheDiffJ;
}
UpdateF<<<nbrCtas,threadsPerCta>>>(d_F,d_KernelI,d_KernelJ,d_y,deltaalphai,deltaalphaj,yi,yj,m);
UpdateF<<<nbrCtas,threadsPerCta>>>(d_F+m,d_KernelI,d_KernelJ,d_y+m,deltaalphai,deltaalphaj,yi,yj,m);
IncrementKernelCache(KernelCacheItersSinceUsed,RowsInKernelCache);
*(KernelCacheItersSinceUsed.begin()+CacheDiffI)=0;
*(KernelCacheItersSinceUsed.begin()+CacheDiffJ)=0;
iter++;
}
cublasGetVector(m*2,sizeof(float),d_alpha,1,alphasvr,1);
for(int k=0;k<m;k++)
{
mexalpha[k]=(alphasvr[k]-alphasvr[k+m])*ybinary[k];
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(elapsed, start, stop);
mexPutVariable("base","cuSVMTrainTimeInMS",mexelapsed);
delete [] ybinary;
delete [] alphasvr;
delete [] xT;
cudaFreeHost(value_inter_svr);
cudaFreeHost(index_inter_svr);
cudaFreeHost(value_inter);
cudaFreeHost(index_inter);
mxCUDA_SAFE_CALL(cudaFree(d_x));
mxCUDA_SAFE_CALL(cudaFree(d_y));
mxCUDA_SAFE_CALL(cudaFree(d_alpha));
mxCUDA_SAFE_CALL(cudaFree(d_Kernel_Cache));
mxCUDA_SAFE_CALL(cudaFree(d_KernelInterRow));
mxCUDA_SAFE_CALL(cudaFree(d_F));
mxCUDA_SAFE_CALL(cudaFree(d_value_inter));
mxCUDA_SAFE_CALL(cudaFree(d_index_inter));
mxCUDA_SAFE_CALL(cudaFree(d_SelfDotProd));
mxCUDA_SAFE_CALL(cudaFree(d_KernelDotProd));
mxCUDA_SAFE_CALL( cudaThreadExit());
return;
}
extern "C"
void SVMTrain(float *mexalpha,float* beta,float*y,float *x ,float _C, float _kernelwidth, int m, int n, float StoppingCrit)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
mxArray *mexelapsed =mxCreateNumericMatrix(1, 1,mxSINGLE_CLASS, mxREAL);
float * elapsed=(float *)mxGetData(mexelapsed);
cudaEventRecord(start,0);
int numBlocks=64;
dim3 ReduceGrid(numBlocks, 1, 1);
dim3 ReduceBlock(256, 1, 1);
float h_taumin=0.0001;
mxCUDA_SAFE_CALL(cudaMemcpyToSymbol(taumin, &h_taumin, sizeof(float)));
_kernelwidth*=-1;
mxCUDA_SAFE_CALL(cudaMemcpyToSymbol(kernelwidth, &_kernelwidth, sizeof(float)));
mxCUDA_SAFE_CALL(cudaMemcpyToSymbol(C, &_C, sizeof(float)));
float *h_alpha=new float [m];
float *h_F=new float [m];
for(int j=0;j<m;j++)
{
h_alpha[j]=0;
h_F[j]=-1;
}
float *SelfDotProd=new float [m];
DotProdVector(x, SelfDotProd,m, n);
int nbrCtas;
int elemsPerCta;
int threadsPerCta;
VectorSplay (m, SAXPY_THREAD_MIN, SAXPY_THREAD_MAX, SAXPY_CTAS_MAX, &nbrCtas, &elemsPerCta,&threadsPerCta);
float * d_x;
float * d_xT;
float * d_alpha;
float* d_y;
float* d_F;
float *d_KernelDotProd;
float *d_SelfDotProd;
float *d_KernelJ;
float *d_KernelI;
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_x, m*n*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_xT, m*n*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMemcpy(d_x, x, sizeof(float)*n*m,cudaMemcpyHostToDevice));
dim3 gridtranspose(ceil((float)m / TRANS_BLOCK_DIM), ceil((float)n / TRANS_BLOCK_DIM), 1);
dim3 threadstranspose(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM, 1);
cudaThreadSynchronize();
transpose<<< gridtranspose, threadstranspose >>>(d_xT, d_x, m, n);
float *xT=new float [n*m];
mxCUDA_SAFE_CALL(cudaMemcpy(xT, d_xT, sizeof(float)*m*n,cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaFree(d_xT));
float* d_KernelInterRow;
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_KernelInterRow, n*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_alpha, m*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_y, m*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_F, m*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_SelfDotProd, m*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_KernelDotProd, m*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMemcpy(d_y, y, sizeof(float)*m,cudaMemcpyHostToDevice));
mxCUDA_SAFE_CALL(cudaMemcpy(d_alpha, h_alpha, sizeof(float)*m,cudaMemcpyHostToDevice));
mxCUDA_SAFE_CALL(cudaMemcpy(d_F, h_F, sizeof(float)*m,cudaMemcpyHostToDevice));
mxCUDA_SAFE_CALL(cudaMemcpy(d_SelfDotProd, SelfDotProd, sizeof(float)*m,cudaMemcpyHostToDevice));
delete [] SelfDotProd;
float* value_inter;
int* index_inter;
cudaMallocHost( (void**)&value_inter, numBlocks*sizeof(float) );
cudaMallocHost( (void**)&index_inter, numBlocks*sizeof(int) );
float* d_value_inter;
int* d_index_inter;
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_value_inter, numBlocks*sizeof(float)));
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_index_inter, numBlocks*sizeof(int)));
size_t free_mem, total;
cuMemGetInfo(&free_mem, &total);
int KernelCacheSize=free_mem-MBtoLeave*1024*1024;
int RowsInKernelCache=KernelCacheSize/(sizeof(float)*m);
/* Do not use all memory available if not needed. */
if (RowsInKernelCache > m) {
RowsInKernelCache = m;
KernelCacheSize = m * sizeof(float) * m;
}
float *d_Kernel_Cache;
mxCUDA_SAFE_CALL(cudaMalloc( (void**) &d_Kernel_Cache, KernelCacheSize));
std::vector<int> KernelCacheIndices(RowsInKernelCache,-1);
std::vector<int> KernelCacheItersSinceUsed(RowsInKernelCache,0);
std::vector<int>::iterator CachePosI;
std::vector<int>::iterator CachePosJ;
int CacheDiffI;
int CacheDiffJ;
int CheckStoppingCritEvery=255;
int iter=0;
float BIValue;
int BIIndex;
float SJValue;
float BJSecondOrderValue;
int BJIndex;
float Kij;
float yj;
float yi;
float alphai;
float alphaj;
float oldalphai;
float oldalphaj;
float Fi;
float Fj;
while (1)
{
FindBI<256><<<ReduceGrid, ReduceBlock>>>(d_F, d_y,d_alpha,d_value_inter,d_index_inter, m);
mxCUDA_SAFE_CALL(cudaMemcpy(value_inter, d_value_inter, sizeof(float)*numBlocks,cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(index_inter, d_index_inter, sizeof(int)*numBlocks,cudaMemcpyDeviceToHost));
cudaThreadSynchronize();
CpuMaxInd(BIValue,BIIndex,value_inter,index_inter,numBlocks);
cudaMemcpy(&Fi, d_F+BIIndex, sizeof(float),cudaMemcpyDeviceToHost);
if ((iter & CheckStoppingCritEvery)==0)
{
FindStoppingJ<256><<<ReduceGrid, ReduceBlock>>>(d_F, d_y,d_alpha,d_value_inter, m);
mxCUDA_SAFE_CALL(cudaMemcpy(value_inter, d_value_inter, sizeof(float)*numBlocks,cudaMemcpyDeviceToHost));
cudaThreadSynchronize();
CpuMin(SJValue,value_inter,numBlocks);
if(BIValue-SJValue<StoppingCrit)
{
if(BIValue-SJValue<StoppingCrit) {*beta=(SJValue+BIValue)/2; break;}
}
}
CachePosI=find(KernelCacheIndices.begin(),KernelCacheIndices.end(),BIIndex);
if (CachePosI ==KernelCacheIndices.end())
{
CacheDiffI=max_element(KernelCacheItersSinceUsed.begin(),KernelCacheItersSinceUsed.end())-KernelCacheItersSinceUsed.begin();
d_KernelI=d_Kernel_Cache+CacheDiffI*m;
mxCUDA_SAFE_CALL(cudaMemcpy(d_KernelInterRow, xT+BIIndex*n, n*sizeof(float),cudaMemcpyHostToDevice));
RBFKernel(d_KernelI,BIIndex,d_x,d_KernelInterRow,d_KernelDotProd,d_SelfDotProd,m,n,nbrCtas,threadsPerCta);
*(KernelCacheIndices.begin()+CacheDiffI)=BIIndex;
}
else
{
CacheDiffI=CachePosI-KernelCacheIndices.begin();
d_KernelI=d_Kernel_Cache+m*CacheDiffI;
}
*(KernelCacheItersSinceUsed.begin()+CacheDiffI)=-1;
FindBJ<256><<<ReduceGrid, ReduceBlock>>>(d_F, d_y,d_alpha,d_KernelI,d_value_inter,d_index_inter,BIValue, m);
mxCUDA_SAFE_CALL(cudaMemcpy(value_inter, d_value_inter, sizeof(float)*numBlocks,cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(index_inter, d_index_inter, sizeof(int)*numBlocks,cudaMemcpyDeviceToHost));
cudaThreadSynchronize();
CpuMaxInd(BJSecondOrderValue,BJIndex,value_inter,index_inter,numBlocks);
mxCUDA_SAFE_CALL(cudaMemcpy(&Kij, d_KernelI+BJIndex, sizeof(float),cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(&alphai, d_alpha+BIIndex, sizeof(float),cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(&alphaj, d_alpha+BJIndex, sizeof(float),cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(&yi, d_y+BIIndex, sizeof(float),cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(&yj, d_y+BJIndex, sizeof(float),cudaMemcpyDeviceToHost));
mxCUDA_SAFE_CALL(cudaMemcpy(&Fj, d_F+BJIndex, sizeof(float),cudaMemcpyDeviceToHost));
oldalphai=alphai;
oldalphaj=alphaj;
UpdateAlphas(alphai,alphaj,Kij,yi,yj,Fi,Fj,_C,h_taumin);
mxCUDA_SAFE_CALL(cudaMemcpy(d_alpha+BIIndex, &alphai, sizeof(float),cudaMemcpyHostToDevice));
mxCUDA_SAFE_CALL(cudaMemcpy(d_alpha+BJIndex, &alphaj, sizeof(float),cudaMemcpyHostToDevice));
float deltaalphai = alphai - oldalphai;
float deltaalphaj = alphaj - oldalphaj;
CachePosJ=find(KernelCacheIndices.begin(),KernelCacheIndices.end(),BJIndex);
if (CachePosJ ==KernelCacheIndices.end())
{
CacheDiffJ=max_element(KernelCacheItersSinceUsed.begin(),KernelCacheItersSinceUsed.end())-KernelCacheItersSinceUsed.begin();
d_KernelJ=d_Kernel_Cache+CacheDiffJ*m;
mxCUDA_SAFE_CALL(cudaMemcpy(d_KernelInterRow, xT+BJIndex*n, n*sizeof(float),cudaMemcpyHostToDevice));
RBFKernel(d_KernelJ,BJIndex,d_x,d_KernelInterRow,d_KernelDotProd,d_SelfDotProd,m,n,nbrCtas,threadsPerCta);
*(KernelCacheIndices.begin()+CacheDiffJ)=BJIndex;
}
else
{
CacheDiffJ=CachePosJ-KernelCacheIndices.begin();
d_KernelJ=d_Kernel_Cache+m*CacheDiffJ;
}
UpdateF<<<nbrCtas,threadsPerCta>>>(d_F,d_KernelI,d_KernelJ,d_y,deltaalphai,deltaalphaj,yi,yj,m);
IncrementKernelCache(KernelCacheItersSinceUsed,RowsInKernelCache);
*(KernelCacheItersSinceUsed.begin()+CacheDiffI)=0;
*(KernelCacheItersSinceUsed.begin()+CacheDiffJ)=0;
iter++;
}
cublasGetVector(m,sizeof(float),d_alpha,1,mexalpha,1);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(elapsed, start, stop);
mexPutVariable("base","cuSVMTrainTimeInMS",mexelapsed);
delete [] xT;
cudaFreeHost(value_inter);
cudaFreeHost(index_inter);
mxCUDA_SAFE_CALL(cudaFree(d_x));
mxCUDA_SAFE_CALL(cudaFree(d_y));
mxCUDA_SAFE_CALL(cudaFree(d_alpha));
mxCUDA_SAFE_CALL(cudaFree(d_KernelInterRow));
mxCUDA_SAFE_CALL(cudaFree(d_Kernel_Cache));
mxCUDA_SAFE_CALL(cudaFree(d_F));
mxCUDA_SAFE_CALL(cudaFree(d_value_inter));
mxCUDA_SAFE_CALL(cudaFree(d_index_inter));
mxCUDA_SAFE_CALL(cudaFree(d_SelfDotProd));
mxCUDA_SAFE_CALL(cudaFree(d_KernelDotProd));
mxCUDA_SAFE_CALL(cudaThreadExit());
return;
}
|
d87b85da2ea150ce3302b4b11f17343fa6b02cb1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* upscale.cu
*
* Author: Alan_Huang
*/
#include "caffe/util/upscale.hpp"
#include "hip/device_functions.h"
namespace caffe {
/*
* nthreads should be n*c*4
*/
template <typename IterpolateAction, typename Dtype>
__global__ void kernel_upscale_2x_corner(const int nthreads, Dtype* src,
const int src_h, const int src_w, Dtype* dst) {
const int src_spatial_dim = src_h * src_w;
const int dst_spatial_dim = src_spatial_dim * 4;
const int dst_h = src_h * 2;
const int dst_w = src_w * 2;
int dst_offset[] = {0, dst_w - 1, dst_w * (dst_h -1), dst_w * dst_h - 1};
int src_offset[] = {0, src_w - 1, src_w * (src_h -1), src_w * src_h - 1};
CUDA_KERNEL_LOOP(index, nthreads) {
int c_id = index / 4;
IterpolateAction::DoEltwise(src,
c_id * src_spatial_dim + src_offset[index % 4],
dst,
c_id * dst_spatial_dim + dst_offset[index % 4]);
}
}
/*
* upscale_all_border_horizontal_lines.
* nthreads should be n*c*(dst_w -2) * 2
*/
template <typename IterpolateAction, typename Dtype>
__global__ void kernel_upscale_2x_border_line_horizontal(const int nthreads,
Dtype* src, const int src_h, const int src_w, Dtype* dst) {
const int src_spatial_dim = src_h * src_w;
const int dst_spatial_dim = src_spatial_dim * 4;
const int dst_h = src_h * 2;
const int dst_w = src_w * 2;
int dst_offset[] = {0, dst_w * (dst_h -1)};
int src_offset[] = {0, src_w * (src_h -1)};
__shared__ Dtype zero;
CUDA_KERNEL_LOOP(index, nthreads) {
int c_id = index / ((dst_w -2) * 2);
int line_id = (index / (dst_w -2)) % 2;
int dst_w_id = 1 + (index % (dst_w -2));
Dtype* src_p11 = src + c_id * src_spatial_dim +
src_offset[line_id] + (dst_w_id-1)/2;
Dtype* dst_p = dst + c_id * dst_spatial_dim +
dst_offset[line_id] + dst_w_id;
IterpolateAction::template Do<Dtype, 1>(src_p11,
src_p11 + 1, &zero, &zero, dst_p,
256/4 + 128 * ((dst_w_id-1)%2), 0);
}
}
/*
* upscale_all_border_horizontal_lines.
* nthreads should be n*c*(dst_h -2) * 2
*/
template <typename IterpolateAction, typename Dtype>
__global__ void kernel_upscale_2x_border_line_vertical(const int nthreads,
Dtype* src, const int src_h, const int src_w, Dtype* dst) {
const int src_spatial_dim = src_h * src_w;
const int dst_spatial_dim = src_spatial_dim * 4;
const int dst_h = src_h * 2;
const int dst_w = src_w * 2;
int dst_offset[] = {0, dst_w - 1};
int src_offset[] = {0, src_w - 1};
__shared__ Dtype zero ;
CUDA_KERNEL_LOOP(index, nthreads) {
int c_id = index / ((dst_h -2) * 2);
int id_inside_c = index % ((dst_h -2) * 2);
int dst_h_id = id_inside_c / 2 + 1;
int col_id = id_inside_c % 2 ;
Dtype* src_p11 = src + c_id * src_spatial_dim +
src_offset[col_id] + (dst_h_id-1)/2 * src_w;
Dtype* dst_p = dst + c_id * dst_spatial_dim +
dst_offset[col_id] + dst_h_id * dst_w;
IterpolateAction::template Do<Dtype, 1>(src_p11,
&zero, src_p11 + src_w, &zero, dst_p,
0, 256/4 + 128 * ((dst_h_id-1)%2));
}
}
/*
* upscale_all_border_horizontal_lines.
* nthreads should be n*c*(dst_h -2) * (dst_w -2)
*/
template <typename IterpolateAction, typename Dtype>
__global__ void kernel_upscale_2x_lines(const int nthreads,
Dtype* src, const int src_h, const int src_w, Dtype* dst) {
const int src_spatial_dim = src_h * src_w;
const int dst_spatial_dim = src_spatial_dim * 4;
const int dst_h = src_h * 2;
const int dst_w = src_w * 2;
CUDA_KERNEL_LOOP(index, nthreads) {
int c_id = index / ((dst_h -2) * (dst_w -2));
int id_inside_c = index % ((dst_h -2) * (dst_w -2));
int dst_h_id = 1 + id_inside_c / (dst_w -2);
int dst_w_id = 1 + id_inside_c % (dst_w -2);
Dtype* src_p11 = src + c_id * src_spatial_dim +
(dst_h_id-1)/2 * src_w + (dst_w_id-1)/2;
Dtype* dst_p = dst + c_id * dst_spatial_dim +
dst_h_id * dst_w + dst_w_id;
IterpolateAction::template Do<Dtype, 1>(src_p11,
src_p11 + 1, src_p11 + src_w, src_p11 + src_w + 1,
dst_p, 256/4 + 128 * ((dst_w_id-1)%2),
256/4 + 128 * ((dst_h_id-1)%2));
}
}
template <typename IterpolateAction, typename Dtype>
void upscale_2x_gpu(Dtype* src_data, const int src_n, const int src_c,
const int src_h, const int src_w, Dtype* dst_data) {
int total_channel_num = src_n * src_c;
int dst_h = src_h * 2;
int dst_w = src_w * 2;
hipLaunchKernelGGL(( kernel_upscale_2x_corner<IterpolateAction, Dtype>) ,
dim3(CAFFE_GET_BLOCKS(total_channel_num * 4)),
dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, total_channel_num * 4, src_data,
src_h, src_w, dst_data);
if (dst_w -2 > 0) {
hipLaunchKernelGGL(( kernel_upscale_2x_border_line_horizontal<IterpolateAction, Dtype>) ,
dim3(CAFFE_GET_BLOCKS(total_channel_num * (dst_w -2) * 2)),
dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, total_channel_num * (dst_w -2) * 2,
src_data, src_h, src_w, dst_data);
}
if (dst_h -2 > 0) {
hipLaunchKernelGGL(( kernel_upscale_2x_border_line_vertical<IterpolateAction, Dtype>) ,
dim3(CAFFE_GET_BLOCKS(total_channel_num * (dst_h -2) * 2)),
dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, total_channel_num * (dst_h -2) * 2,
src_data, src_h, src_w, dst_data);
}
if (dst_w -2 > 0 && dst_h -2 > 0) {
hipLaunchKernelGGL(( kernel_upscale_2x_lines<IterpolateAction, Dtype>) ,
dim3(CAFFE_GET_BLOCKS(total_channel_num * (dst_h -2) * (dst_w -2))),
dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, total_channel_num * (dst_h -2) * (dst_w -2),
src_data, src_h, src_w, dst_data);
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void Blob2xUpscaler<Dtype>::Forward_gpu(const Blob<Dtype>& src_blob,
Blob<Dtype>& dst_blob) {
Blob2xUpscaler<Dtype>::Check(src_blob, dst_blob);
int last_dim = src_blob.shape().size() - 1;
int total_channel_num = src_blob.count(0, src_blob.shape().size() - 2);
int src_spatial_dim = src_blob.count(last_dim - 1);
int dst_spatial_dim = dst_blob.count(last_dim - 1);
int src_h = src_blob.shape(last_dim - 1);
int src_w = src_blob.shape(last_dim);
Dtype* src_data = const_cast<Dtype*>(src_blob.gpu_data());
Dtype* dst_data = dst_blob.mutable_gpu_data();
upscale_2x_gpu<PointInterpolateForward, Dtype>(src_data, total_channel_num,
1, src_h, src_w, dst_data);
}
template <typename Dtype>
void Blob2xUpscaler<Dtype>::Backward_gpu(const Blob<Dtype>& dst_blob,
Blob<Dtype>& src_blob) {
Blob2xUpscaler<Dtype>::Check(src_blob, dst_blob);
int last_dim = src_blob.shape().size() - 1;
int total_channel_num = src_blob.count(0, src_blob.shape().size() - 2);
int src_spatial_dim = src_blob.count(last_dim - 1);
int dst_spatial_dim = dst_blob.count(last_dim - 1);
int src_h = src_blob.shape(last_dim - 1);
int src_w = src_blob.shape(last_dim);
Dtype* dst_data = const_cast<Dtype*>(dst_blob.gpu_diff());
Dtype* src_data = src_blob.mutable_gpu_diff();
upscale_2x_gpu<PointInterpolateBackward, Dtype>(src_data, total_channel_num,
1, src_h, src_w, dst_data);
}
template void Blob2xUpscaler<float>::Forward_gpu(const Blob<float>& src_blob,
Blob<float>& dst_blob);
template void Blob2xUpscaler<double>::Forward_gpu(const Blob<double>& src_blob,
Blob<double>& dst_blob);
template void Blob2xUpscaler<float>::Backward_gpu(const Blob<float>& dst_blob,
Blob<float>& src_blob);
template void Blob2xUpscaler<double>::Backward_gpu(const Blob<double>& dst_blob,
Blob<double>& src_blob);
} // namespace caffe
|
d87b85da2ea150ce3302b4b11f17343fa6b02cb1.cu
|
/*
* upscale.cu
*
* Author: Alan_Huang
*/
#include "caffe/util/upscale.hpp"
#include "device_functions.h"
namespace caffe {
/*
* nthreads should be n*c*4
*/
template <typename IterpolateAction, typename Dtype>
__global__ void kernel_upscale_2x_corner(const int nthreads, Dtype* src,
const int src_h, const int src_w, Dtype* dst) {
const int src_spatial_dim = src_h * src_w;
const int dst_spatial_dim = src_spatial_dim * 4;
const int dst_h = src_h * 2;
const int dst_w = src_w * 2;
int dst_offset[] = {0, dst_w - 1, dst_w * (dst_h -1), dst_w * dst_h - 1};
int src_offset[] = {0, src_w - 1, src_w * (src_h -1), src_w * src_h - 1};
CUDA_KERNEL_LOOP(index, nthreads) {
int c_id = index / 4;
IterpolateAction::DoEltwise(src,
c_id * src_spatial_dim + src_offset[index % 4],
dst,
c_id * dst_spatial_dim + dst_offset[index % 4]);
}
}
/*
* upscale_all_border_horizontal_lines.
* nthreads should be n*c*(dst_w -2) * 2
*/
template <typename IterpolateAction, typename Dtype>
__global__ void kernel_upscale_2x_border_line_horizontal(const int nthreads,
Dtype* src, const int src_h, const int src_w, Dtype* dst) {
const int src_spatial_dim = src_h * src_w;
const int dst_spatial_dim = src_spatial_dim * 4;
const int dst_h = src_h * 2;
const int dst_w = src_w * 2;
int dst_offset[] = {0, dst_w * (dst_h -1)};
int src_offset[] = {0, src_w * (src_h -1)};
__shared__ Dtype zero;
CUDA_KERNEL_LOOP(index, nthreads) {
int c_id = index / ((dst_w -2) * 2);
int line_id = (index / (dst_w -2)) % 2;
int dst_w_id = 1 + (index % (dst_w -2));
Dtype* src_p11 = src + c_id * src_spatial_dim +
src_offset[line_id] + (dst_w_id-1)/2;
Dtype* dst_p = dst + c_id * dst_spatial_dim +
dst_offset[line_id] + dst_w_id;
IterpolateAction::template Do<Dtype, 1>(src_p11,
src_p11 + 1, &zero, &zero, dst_p,
256/4 + 128 * ((dst_w_id-1)%2), 0);
}
}
/*
* upscale_all_border_horizontal_lines.
* nthreads should be n*c*(dst_h -2) * 2
*/
template <typename IterpolateAction, typename Dtype>
__global__ void kernel_upscale_2x_border_line_vertical(const int nthreads,
Dtype* src, const int src_h, const int src_w, Dtype* dst) {
const int src_spatial_dim = src_h * src_w;
const int dst_spatial_dim = src_spatial_dim * 4;
const int dst_h = src_h * 2;
const int dst_w = src_w * 2;
int dst_offset[] = {0, dst_w - 1};
int src_offset[] = {0, src_w - 1};
__shared__ Dtype zero ;
CUDA_KERNEL_LOOP(index, nthreads) {
int c_id = index / ((dst_h -2) * 2);
int id_inside_c = index % ((dst_h -2) * 2);
int dst_h_id = id_inside_c / 2 + 1;
int col_id = id_inside_c % 2 ;
Dtype* src_p11 = src + c_id * src_spatial_dim +
src_offset[col_id] + (dst_h_id-1)/2 * src_w;
Dtype* dst_p = dst + c_id * dst_spatial_dim +
dst_offset[col_id] + dst_h_id * dst_w;
IterpolateAction::template Do<Dtype, 1>(src_p11,
&zero, src_p11 + src_w, &zero, dst_p,
0, 256/4 + 128 * ((dst_h_id-1)%2));
}
}
/*
* upscale_all_border_horizontal_lines.
* nthreads should be n*c*(dst_h -2) * (dst_w -2)
*/
template <typename IterpolateAction, typename Dtype>
__global__ void kernel_upscale_2x_lines(const int nthreads,
Dtype* src, const int src_h, const int src_w, Dtype* dst) {
const int src_spatial_dim = src_h * src_w;
const int dst_spatial_dim = src_spatial_dim * 4;
const int dst_h = src_h * 2;
const int dst_w = src_w * 2;
CUDA_KERNEL_LOOP(index, nthreads) {
int c_id = index / ((dst_h -2) * (dst_w -2));
int id_inside_c = index % ((dst_h -2) * (dst_w -2));
int dst_h_id = 1 + id_inside_c / (dst_w -2);
int dst_w_id = 1 + id_inside_c % (dst_w -2);
Dtype* src_p11 = src + c_id * src_spatial_dim +
(dst_h_id-1)/2 * src_w + (dst_w_id-1)/2;
Dtype* dst_p = dst + c_id * dst_spatial_dim +
dst_h_id * dst_w + dst_w_id;
IterpolateAction::template Do<Dtype, 1>(src_p11,
src_p11 + 1, src_p11 + src_w, src_p11 + src_w + 1,
dst_p, 256/4 + 128 * ((dst_w_id-1)%2),
256/4 + 128 * ((dst_h_id-1)%2));
}
}
template <typename IterpolateAction, typename Dtype>
void upscale_2x_gpu(Dtype* src_data, const int src_n, const int src_c,
const int src_h, const int src_w, Dtype* dst_data) {
int total_channel_num = src_n * src_c;
int dst_h = src_h * 2;
int dst_w = src_w * 2;
kernel_upscale_2x_corner<IterpolateAction, Dtype> <<<
CAFFE_GET_BLOCKS(total_channel_num * 4),
CAFFE_CUDA_NUM_THREADS >>> (total_channel_num * 4, src_data,
src_h, src_w, dst_data);
if (dst_w -2 > 0) {
kernel_upscale_2x_border_line_horizontal<IterpolateAction, Dtype> <<<
CAFFE_GET_BLOCKS(total_channel_num * (dst_w -2) * 2),
CAFFE_CUDA_NUM_THREADS >>> (total_channel_num * (dst_w -2) * 2,
src_data, src_h, src_w, dst_data);
}
if (dst_h -2 > 0) {
kernel_upscale_2x_border_line_vertical<IterpolateAction, Dtype> <<<
CAFFE_GET_BLOCKS(total_channel_num * (dst_h -2) * 2),
CAFFE_CUDA_NUM_THREADS >>> (total_channel_num * (dst_h -2) * 2,
src_data, src_h, src_w, dst_data);
}
if (dst_w -2 > 0 && dst_h -2 > 0) {
kernel_upscale_2x_lines<IterpolateAction, Dtype> <<<
CAFFE_GET_BLOCKS(total_channel_num * (dst_h -2) * (dst_w -2)),
CAFFE_CUDA_NUM_THREADS >>> (total_channel_num * (dst_h -2) * (dst_w -2),
src_data, src_h, src_w, dst_data);
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void Blob2xUpscaler<Dtype>::Forward_gpu(const Blob<Dtype>& src_blob,
Blob<Dtype>& dst_blob) {
Blob2xUpscaler<Dtype>::Check(src_blob, dst_blob);
int last_dim = src_blob.shape().size() - 1;
int total_channel_num = src_blob.count(0, src_blob.shape().size() - 2);
int src_spatial_dim = src_blob.count(last_dim - 1);
int dst_spatial_dim = dst_blob.count(last_dim - 1);
int src_h = src_blob.shape(last_dim - 1);
int src_w = src_blob.shape(last_dim);
Dtype* src_data = const_cast<Dtype*>(src_blob.gpu_data());
Dtype* dst_data = dst_blob.mutable_gpu_data();
upscale_2x_gpu<PointInterpolateForward, Dtype>(src_data, total_channel_num,
1, src_h, src_w, dst_data);
}
template <typename Dtype>
void Blob2xUpscaler<Dtype>::Backward_gpu(const Blob<Dtype>& dst_blob,
Blob<Dtype>& src_blob) {
Blob2xUpscaler<Dtype>::Check(src_blob, dst_blob);
int last_dim = src_blob.shape().size() - 1;
int total_channel_num = src_blob.count(0, src_blob.shape().size() - 2);
int src_spatial_dim = src_blob.count(last_dim - 1);
int dst_spatial_dim = dst_blob.count(last_dim - 1);
int src_h = src_blob.shape(last_dim - 1);
int src_w = src_blob.shape(last_dim);
Dtype* dst_data = const_cast<Dtype*>(dst_blob.gpu_diff());
Dtype* src_data = src_blob.mutable_gpu_diff();
upscale_2x_gpu<PointInterpolateBackward, Dtype>(src_data, total_channel_num,
1, src_h, src_w, dst_data);
}
template void Blob2xUpscaler<float>::Forward_gpu(const Blob<float>& src_blob,
Blob<float>& dst_blob);
template void Blob2xUpscaler<double>::Forward_gpu(const Blob<double>& src_blob,
Blob<double>& dst_blob);
template void Blob2xUpscaler<float>::Backward_gpu(const Blob<float>& dst_blob,
Blob<float>& src_blob);
template void Blob2xUpscaler<double>::Backward_gpu(const Blob<double>& dst_blob,
Blob<double>& src_blob);
} // namespace caffe
|
3c491559cc22e16c2478a0da64c79e13d42301a9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Performs a finite difference heat flow
* simulation using conduction and convection.
* Uses CUDA to perform calculations on a GPGPU
*/
#define _USE_MATH_DEFINES
#ifdef _WIN32
#define NOMINMAX //FYI need to disable min/max macro in windows.h
#include <windows.h>
#endif
#ifdef DISPLAY
#ifdef __APPLE__
# include <OpenGL/gl.h>
# include <OpenGL/glu.h>
# include <GLUT/glut.h>
#else
# include <GL/GL.h>
# include <GL/GLU.h>
# include <GL/glut.h>
#endif
#endif
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <iomanip>
#include <limits>
#include <cmath>
/**
* Time_step (seconds/yr) divided by the product of density and heat capacity.
* The value for the density is 2200 kg/m^3 and heat capacity is 1000 kJ/kg K.
*/
#define QFAC 14.33 //Description is defined in the previous comment
#define DTC 0.25 //
#define OUT_PRECISION 10 //Number of digits to print after the decimal place for floating point values
#define INDEX_WIDTH 2 //The number of characters to print and read for each conduction and convection code
#define REAL double //The precision of the model.
#define FREEMEM 100000000 //Amount of memory to leave free on the GPU in bytes
using std::cerr;
using std::cin;
using std::cout;
using std::endl;
using std::string;
using std::ofstream;
using std::ifstream;
using std::ostringstream;
using std::setw;
using std::right;
using std::left;
using std::fixed;
using std::scientific;
using std::setprecision;
using std::setfill;
using std::ios;
using std::numeric_limits;
using std::streamsize;
using std::max;
using std::flush;
void save_surfer();
void save_model_state();
void conduction();
void convection();
void PressEnterToContinue();
REAL find_max_temp_diff();
void update_moving_sources();
void find_loc_index(REAL x_loc, REAL y_loc, REAL z_loc, int *index);
//Cuda specific variables
dim3 dimBlock; //Block dimensions for the kernel call
dim3 dimGrid; //Grid dimensions for the kernel call
hipError_t error; //CUDA error variable
hipDeviceProp_t deviceProp; //CUDA device properties
//Conduction code specific variables
int *cond_codes; //The unmodified conduction codes as read from the input file
int *cond_hp_index; //The conduction index for the radioactive heat production array
int *cond_tc_index; //The conduction index for the thermal conductivity array
int *use_cond; //Flag to indicate if conduction occurs for a given cell
REAL DHF; //
//Device specific conduction variables
int *dev_cond_codes; //The unmodified conduction codes as read from the input file
int *dev_cond_hp_index; //The conduction index for the radioactive heat production array
int *dev_cond_tc_index; //The conduction index for the thermal conductivity array
int *dev_use_cond;
//Convection code specific variables
int *conv_codes; //The convection codes as read from the input file
int *conv_min_temp_index; //The convection index for the minimum temp for convection array
int *conv_direction; //The direction of convection following the direction matrix in the previous comment
int *conv_vel_index; //The convection index for the velocity array
int *conv_fluid_index; //The convection index for the fluid heat capacity array
int *conv_rock_index; //The convection index for the rock heat capacity array
int num_conv_loops; //The number of convection updates to perform per time step
REAL time_inc; //The amount of time increment per convection loop
//Device specific convection variables
int *dev_conv_codes; //The convection codes as read from the input file
int *dev_conv_min_temp_index; //The convection index for the minimum temp for convection array
int *dev_conv_direction; //The direction of convection following the direction matrix in the previous comment
int *dev_conv_vel_index; //The convection index for the velocity array
int *dev_conv_fluid_index; //The convection index for the fluid heat capacity array
int *dev_conv_rock_index; //The convection index for the rock heat capacity array
//File names
string source_filename; //The input files name with extension
string output_filename; //The output state files name with extension
string output_su_filename; //The output surfer files name with extension
//Input file variables
string title; //The title of the input file
int using_convection = -1; //Indicates if convection is being used
REAL *temp; //The current temperature array
int num_rows; //The number of rows for the simulation
int num_cols; //The number of columns for the simulation
int num_slices; //Total number of slices to form the 3d simulation (one 'slice' has dimension rows x columns)
REAL *dim_x; //The dimensions of each column in the x direction
REAL *dim_y; //The dimensions of each row in the y direction
REAL *dim_z; //The dimensions of each row in the z direction
REAL *dist_x; //The distance from the origin to the center of a column for a given column index in the x direction
REAL *dist_y; //The distance from the origin to the center of a row for a given row index in the y direction
REAL *dist_z; //The distance from the origin to the center of a slice for a given slice index in the z direction
REAL max_dist_x; //The maximum x distance
REAL max_dist_y; //The maximum y distance
REAL max_dist_z; //The maximum z distance
REAL chf; //Constant Heat flow at base of model in mW M^2
REAL initial_time; //The initial starting time of the model
int num_hp; //The number of heat production values
int num_tcd; //The number of thermal conductivity difference values
int num_hcf; //The number of fluid heat capacity values
int num_hcr; //The number of rock heat capacity values
int num_mtc; //The number of minimum convection temperature values
int num_vel; //The number of convection velocities
REAL *heat_production_values; //The radioactive heat production values array used in conduction calculations
REAL *thermal_conduct_diff; //The thermal conductivity difference array used in conduction calculations
REAL *heat_capac_fluid; //The fluid heat capacity array used in convection calculations
REAL *heat_capac_rock; //The rock heat capacity array used in convection calculations
REAL *min_temp_conv; //The minimum temperature required for convection
REAL *vel; //The velocity array used for convection calculations
//Device specific variables
REAL *dev_temp; //The current temperature array
REAL *dev_dim_x; //The dimensions of each column in the x direction
REAL *dev_dim_y; //The dimensions of each row in the y direction
REAL *dev_dim_z; //The dimensions of each row in the y direction
REAL *dev_dist_x; //The dimensions of each row in the y direction
REAL *dev_dist_y; //The dimensions of each row in the y direction
REAL *dev_dist_z; //The dimensions of each row in the y direction
REAL *dev_heat_production_values; //The radioactive heat production values array used in conduction calculations
REAL *dev_thermal_conduct_diff; //The thermal conductivity difference array used in conduction calculations
REAL *dev_heat_capac_fluid; //The fluid heat capacity array used in convection calculations
REAL *dev_heat_capac_rock; //The rock heat capacity array used in convection calculations
REAL *dev_min_temp_conv; //The minimum temperature required for convection
REAL *dev_vel; //The velocity array used for convection calculations
//Moving source variables
int using_moving_source = -1; //Indicates if a moving source is being used
int num_mvsrc = -1; //The number of moving sources
REAL *mvsrc_x; //The x location of the moving sources
REAL *mvsrc_y; //The y location of the moving sources
REAL *mvsrc_z; //The z location of the moving sources
REAL *mvsrc_offset_x; //The size of the moving sources in the x direction
REAL *mvsrc_offset_y; //The size of the moving sources in the y direction
REAL *mvsrc_offset_z; //The size of the moving sources in the z direction
REAL *mvsrc_vel_x; //The x component of the moving sources velocity vectors
REAL *mvsrc_vel_y; //The y component of the moving sources velocity vectors
REAL *mvsrc_vel_z; //The z component of the moving sources velocity vectors
REAL *mvsrc_accel_x; //The x component of the moving sources acceleration vectors
REAL *mvsrc_accel_y; //The y component of the moving sources acceleration vectors
REAL *mvsrc_accel_z; //The z component of the moving sources acceleration vectors
REAL *mvsrc_temp; //The temperature of the moving sources
int *mvsrc_valid; //Indicates if a moving source is valid
//Time specific variables
REAL sim_time; //The current time of the simulation
REAL tic; //Time variable used in convection calculations
REAL time_step = -1; //The amount of time that passes between each update of the simulation, time step
REAL run_time = -1; //The run time of the simulation
//Global variables
int save_state = -1; //Indicates if the model should save the current state at each screen update
int save_result = -1; //Indicates if the model should save the final result of the simulaiton
int use_tolerance = -1; //Indicates if the model should stop once a user specified tolerance is met for temperature change
REAL max_vel; //The maximum convection velocity of the velocity array
REAL min_row_dim; //The minimum y dimension of each cell of the simulation
REAL min_col_dim; //The minimum x dimension of each cell of the simulation
REAL min_slice_dim; // The minimum z dimension of each cell in the simulation
REAL thermal_time_constant; //The thermal time constant of the model, used in the selection of the run time
REAL *next_temp; //The next temperature array
REAL max_thermal_conduct_diff; //The maximum thermal conductivity difference
REAL min_thermal_conduct_diff; //The minimum thermal conductivity difference
int num_loops = -1; //The number of loops between screen updates
int su_num_width; //The number of characters for the slice number in the output surfer filenames
unsigned long long count = 0; //The current loop
REAL tolerance = -1; //The maximum difference required for the model to stop
REAL max_temp_diff; //The maximum temperature difference between the current and next temperature arrays
unsigned long long num_cells; //The number of cells in the simulation
REAL *dev_next_temp; //The next temperature array
/**
* Deallocates all allocated memory used by the program
*/
void deallocate_memory() {
//Deletes allocated memory
delete[] dim_x;
delete[] dim_y;
delete[] dim_z;
delete[] dist_x;
delete[] dist_y;
delete[] dist_z;
delete[] temp;
delete[] next_temp;
delete[] cond_codes;
delete[] cond_hp_index;
delete[] cond_tc_index;
delete[] use_cond;
delete[] heat_production_values;
delete[] thermal_conduct_diff;
if(using_convection == 1) {
delete[] conv_codes;
delete[] conv_min_temp_index;
delete[] conv_direction;
delete[] conv_vel_index;
delete[] conv_fluid_index;
delete[] conv_rock_index;
delete[] heat_capac_fluid;
delete[] heat_capac_rock;
delete[] min_temp_conv;
delete[] vel;
}
if(using_moving_source == 1) {
delete[] mvsrc_x;
delete[] mvsrc_y;
delete[] mvsrc_z;
delete[] mvsrc_offset_x;
delete[] mvsrc_offset_y;
delete[] mvsrc_offset_z;
delete[] mvsrc_vel_x;
delete[] mvsrc_vel_y;
delete[] mvsrc_vel_z;
delete[] mvsrc_accel_x;
delete[] mvsrc_accel_y;
delete[] mvsrc_accel_z;
delete[] mvsrc_temp;
delete[] mvsrc_valid;
}
}
void deallocate_cuda_memory() {
hipFree(dev_temp);
hipFree(dev_next_temp);
hipFree(dev_dim_x);
hipFree(dev_dim_y);
hipFree(dev_dim_z);
hipFree(dev_dist_x);
hipFree(dev_dist_y);
hipFree(dev_dist_z);
hipFree(dev_cond_codes);
hipFree(dev_cond_hp_index);
hipFree(dev_cond_tc_index);
hipFree(dev_use_cond);
hipFree(dev_heat_production_values);
hipFree(dev_thermal_conduct_diff);
if(using_convection) {
hipFree(dev_heat_capac_fluid);
hipFree(dev_heat_capac_rock);
hipFree(dev_min_temp_conv);
hipFree(dev_vel);
hipFree(dev_conv_codes);
hipFree(dev_conv_min_temp_index);
hipFree(dev_conv_direction);
hipFree(dev_conv_vel_index);
hipFree(dev_conv_fluid_index);
hipFree(dev_conv_rock_index);
}
}
#ifdef DISPLAY
//Display variables
int display_mode = -1;
int array_size;
REAL min_temp;
REAL max_temp;
REAL layer_min_temp;
REAL layer_max_temp;
float *color_field;
float transparency = 1.0f;
int current_slice = 0;
/**
* Parameters to control the camera angle so we can move where we're looking at
* the simulation from with the mouse. Kept for debugging.
*/
/*
int ox = 0;
int oy = 0;
int buttonState = 0;
float camera_trans[] = {0, -0.2, -10};
float camera_rot[] = {0, 0, 0};
float camera_trans_lag[] = {0, -0.2, -10};
float camera_rot_lag[] = {0, 0, 0};
const float inertia = 0.1f;
*/
/*
* Sets max and min temperature values for simulation
*/
void array_minmax() {
min_temp=temp[0][0][0];
max_temp=temp[0][0][0];
for(int k=0; k<num_slices; k++) {
for (int i=0; i<num_rows; i++) {
for(int j=0; j<num_cols; j++) {
if(temp[i][j][k]<min_temp)
min_temp = temp[i][j][k];
if(temp[i][j][k]>max_temp)
max_temp = temp[i][j][k];
}
}
}
}
/**
* Updates max_temp with the maximum temp of
* the current slice.
*/
void array_max() {
max_temp=temp[0][0][0];
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
if(temp[i][j][current_slice] > max_temp) {
max_temp = temp[i][j][current_slice];
}
}
}
}
/*
* 3D to 1D indexing
*/
static int POSITION(int x, int y) {
return (x*num_cols)+y;
}
/*
* Colormap algorithm reproduces Matlab's RGB "Jet" plate
* Concept based on: http://paulbourke.net/texture_colour/colourspace/ (11/21/12)
*/
void jet_color_set(int x, int y, int z) {
REAL current_temp = temp[x][y][z];
REAL delta_temp = max_temp - min_temp;
if(current_temp < min_temp)
current_temp = min_temp;
if(current_temp > max_temp)
current_temp = max_temp;
if(current_temp < (min_temp + 0.25 * delta_temp)) {
color_field[POSITION(x,y) * 3] = (GLfloat)0.0;
color_field[POSITION(x,y) * 3 + 1] = (GLfloat)(4*(current_temp - min_temp)/delta_temp);
color_field[POSITION(x,y) * 3 + 2] = (GLfloat)1.0;
}
else if(current_temp < (min_temp + 0.5 * delta_temp)) {
color_field[POSITION(x,y) * 3] = (GLfloat)0.0;
color_field[POSITION(x,y) * 3 + 1] = (GLfloat)1.0;
color_field[POSITION(x,y) * 3 + 2] = (GLfloat)(1.0 + 4 * (min_temp + 0.25 * delta_temp - current_temp) / delta_temp);
}
else if(current_temp < (min_temp + 0.75 * delta_temp)) {
color_field[POSITION(x,y) * 3] = (GLfloat)(4 * (current_temp - min_temp - 0.5 * delta_temp) / delta_temp);
color_field[POSITION(x,y) * 3 + 1] = (GLfloat)1.0;
color_field[POSITION(x,y) * 3 + 2] = (GLfloat)0.0;
}
else {
color_field[POSITION(x,y) * 3] = (GLfloat)1.0;
color_field[POSITION(x,y) * 3 + 1] = (GLfloat)(1.0 + 4 * (min_temp + 0.75 * delta_temp - current_temp) / delta_temp);
color_field[POSITION(x,y) * 3 + 2] = (GLfloat)0.0;
}
}
/*
* Draw temp surface via 1x1 faces. Dimensions constant for simplicity.
* Originally drew cubes from Robert Bergmans voxel display code.
*/
void draw_cube(int x, int y, int z) {
if(z == current_slice) {
transparency = 1.0f;
}
else {
transparency = 0.3f;
}
glBegin(GL_TRIANGLES);
//front
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,1.0f);//5
glVertex3f(1.0f,0.0f,1.0f);//6
glVertex3f(0.0f,-1.0f,1.0f);//8
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,-1.0f,1.0f);//8
glVertex3f(1.0f,0.0f,1.0f);//6
glVertex3f(1.0f,-1.0f,1.0f);//7
//top
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,0.0f);//1
glVertex3f(1.0f,0.0f,0.0f);//2
glVertex3f(0.0f,0.0f,1.0f);//5
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,1.0f);//5
glVertex3f(1.0f,0.0f,0.0f);//2
glVertex3f(1.0f,0.0f,1.0f);//6
//QUADS code left in case we need it later
/*
glBegin(GL_QUADS);
//front
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,1.0f);//5
glVertex3f(1.0f,0.0f,1.0f);//6
glVertex3f(1.0f,-1.0f,1.0f);//7
glVertex3f(0.0f,-1.0f,1.0f);//8
//top
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,0.0f);//1
glVertex3f(1.0f,0.0f,0.0f);//2
glVertex3f(1.0f,0.0f,1.0f);//6
glVertex3f(0.0f,0.0f,1.0f);//5
*//*//left
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,0.0f);//1
glVertex3f(0.0f,0.0f,1.0f);//5
glVertex3f(0.0f,-1.0f,1.0f);//8
glVertex3f(0.0f,-1.0f,0.0f);//4
//right
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(1.0f,0.0f,0.0f);//2
glVertex3f(1.0f,0.0f,1.0f);//6
glVertex3f(1.0f,-1.0f,1.0f);//7
glVertex3f(1.0f,-1.0f,0.0f);//3
//bottom
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,-1.0f,0.0f);//4
glVertex3f(1.0f,-1.0f,0.0f);//3
glVertex3f(1.0f,-1.0f,1.0f);//7
glVertex3f(0.0f,-1.0f,1.0f);//8
//back
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,0.0f);//1
glVertex3f(1.0f,0.0f,0.0f);//2
glVertex3f(1.0f,-1.0f,0.0f);//3
glVertex3f(0.0f,-1.0f,0.0f);//4
*/
glEnd();
}
/*
*Draw all HUD/Overlay graphics. Quads for temp scale are hardcoded to Jet color map. Any new color map will require changes.
*/
void displayOverlay(){
int windowWidth = glutGet(GLUT_WINDOW_WIDTH);
int windowHeight = glutGet(GLUT_WINDOW_HEIGHT);
glMatrixMode( GL_PROJECTION );
glPushMatrix();
glLoadIdentity();
glOrtho(0.0f,windowWidth,windowHeight,0.0f,0.0f,1.0f);
glMatrixMode( GL_MODELVIEW );
glPushMatrix();
glLoadIdentity();
glBegin( GL_QUADS );
glColor3f( 0.0f, 0.0f, 1.0f );
glVertex2f( (GLfloat)(windowWidth/2-75), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2-45), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2-45), 50.0f );
glVertex2f( (GLfloat)(windowWidth/2-75), 50.0f );
glColor3f( 0.0f, 1.0f, 1.0f );
glVertex2f( (GLfloat)(windowWidth/2-45), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2-15), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2-15), 50.0f );
glVertex2f( (GLfloat)(windowWidth/2-45), 50.0f );
glColor3f( 0.0f, 1.0f, 0.0f );
glVertex2f( (GLfloat)(windowWidth/2-15), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2+15), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2+15), 50.0f );
glVertex2f( (GLfloat)(windowWidth/2-15), 50.0f );
glColor3f( 1.0f, 1.0f, 0.0f );
glVertex2f( (GLfloat)(windowWidth/2+15), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2+45), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2+45), 50.0f );
glVertex2f( (GLfloat)(windowWidth/2+15), 50.0f );
glColor3f( 1.0f, 0.0f, 0.0f );
glVertex2f( (GLfloat)(windowWidth/2+45), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2+75), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2+75), 50.0f );
glVertex2f( (GLfloat)(windowWidth/2+45), 50.0f );
glEnd();
glPopMatrix();
glPushMatrix();
glLoadIdentity();
ostringstream str1;
str1 << "Min Temp < > Max Temp";
glColor3f(1.0f, 1.0f, 1.0f);
glRasterPos2f((GLfloat)(windowWidth/2-150),35.0f);
glutBitmapString(GLUT_BITMAP_HELVETICA_12, (const unsigned char *)str1.str().c_str());
str1.str("");
str1.clear();
str1 << setw(4) << min_temp;
glRasterPos2f((GLfloat)(windowWidth/2-100), 65.0f);
glutBitmapString(GLUT_BITMAP_HELVETICA_12, (const unsigned char *)str1.str().c_str());
str1.str("");
str1.clear();
str1 << setw(4) << (max_temp+min_temp)/2;
glRasterPos2f((GLfloat)(windowWidth/2-20), 65.0f);
glutBitmapString(GLUT_BITMAP_HELVETICA_12, (const unsigned char *)str1.str().c_str());
str1.str("");
str1.clear();
str1 << setw(4) << max_temp;
glRasterPos2f((GLfloat)(windowWidth/2+60), 65.0f);
glutBitmapString(GLUT_BITMAP_HELVETICA_12, (const unsigned char *)str1.str().c_str());
str1.str("");
str1.clear();
glPopMatrix();
glPushMatrix();
glLoadIdentity();
ostringstream str;
str << "Time Interval:" << endl;
if(using_convection) {
str << "Conv. Time Interval:" << endl;
str << "Num Conv. Loops:" << endl;
}
str << endl << "Loop Total:" << endl;
str << "Sim Time:" << endl;
str << "Cum. Sim Time:" << endl << endl;
str << "Model Dimensions:" << endl;
str << "Current Slice:" << endl;
str << "CHF:" << endl;
glColor3f(1.0f, 1.0f, 1.0f);
glRasterPos2f(10.0f,(GLfloat)(windowHeight*3.0/4.0));
glutBitmapString(GLUT_BITMAP_HELVETICA_12, (const unsigned char *)str.str().c_str());
str.str("");
str.clear();
str << setprecision(4) << scientific;
str << time_step << endl;
if(using_convection) {
str << time_inc << endl;
str << num_conv_loops << endl;
}
str << endl << count << endl;
str << sim_time << endl;
str << initial_time + sim_time << endl << endl;
str << num_rows << " X " << num_cols << " X " << num_slices << endl;
str << current_slice+1 << endl;
str << chf << endl;
glColor3f(1.0f, 1.0f, 1.0f);
glRasterPos2f(150.0f,(GLfloat)(windowHeight*3.0/4.0));
glutBitmapString(GLUT_BITMAP_HELVETICA_12, (const unsigned char *)str.str().c_str());
glPopMatrix();
glMatrixMode( GL_PROJECTION );
glPopMatrix();
}
/*
* Helper function called from display3d. Broken out for readability.
*/
void display_helper() {
array_max();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// Handle the camera angle. Maintained for debugging
/*
for (int c = 0; c < 3; ++c)
{
camera_trans_lag[c] += (camera_trans[c] - camera_trans_lag[c]) * inertia;
camera_rot_lag[c] += (camera_rot[c] - camera_rot_lag[c]) * inertia;
}
glTranslatef(camera_trans_lag[0], camera_trans_lag[1], camera_trans_lag[2]);
glRotatef(camera_rot_lag[0], 1.0, 0.0, 0.0);
glRotatef(camera_rot_lag[1], 0.0, 1.0, 0.0);
*/
//Draw the boundary lines
glBegin(GL_LINES);
glColor3f(1.0, 1.0, 1.0);
glVertex3f(0.0f,0.0f,0.0f);
glVertex3f((GLfloat)num_cols,0.0f,0.0f);
glVertex3f((GLfloat)num_cols,0.0f,0.0f);
glVertex3f((GLfloat)num_cols,(GLfloat)-num_rows,0.0f);
glVertex3f((GLfloat)num_cols,(GLfloat)-num_rows,0.0f);
glVertex3f(0.0f,(GLfloat)-num_rows,0.0f);
glVertex3f(0.0f,(GLfloat)-num_rows,0.0f);
glVertex3f(0.0f,0.0f,0.0f);
glVertex3f(0.0f,0.0f,(GLfloat)num_slices);
glVertex3f((GLfloat)num_cols,0.0f,(GLfloat)num_slices);
glVertex3f((GLfloat)num_cols,0.0f,(GLfloat)num_slices);
glVertex3f((GLfloat)num_cols,(GLfloat)-num_rows,(GLfloat)num_slices);
glVertex3f((GLfloat)num_cols,(GLfloat)-num_rows,(GLfloat)num_slices);
glVertex3f(0.0f,(GLfloat)-num_rows,(GLfloat)num_slices);
glVertex3f(0.0f,(GLfloat)-num_rows,(GLfloat)num_slices);
glVertex3f(0.0f,0.0f,(GLfloat)num_slices);
glVertex3f(0.0f,0.0f,0.0f);
glVertex3f(0.0f,0.0f,(GLfloat)num_slices);
glVertex3f((GLfloat)num_cols,0.0f,0.0f);
glVertex3f((GLfloat)num_cols,0.0f,(GLfloat)num_slices);
glVertex3f((GLfloat)num_cols,(GLfloat)-num_rows,0.0f);
glVertex3f((GLfloat)num_cols,(GLfloat)-num_rows,(GLfloat)num_slices);
glVertex3f(0.0f,(GLfloat)-num_rows,0.0f);
glVertex3f(0.0f,(GLfloat)-num_rows,(GLfloat)num_slices);
glEnd();
for(int i=0; i<num_rows; i++) {
for (int j=0; j<num_cols; j++) {
glPushMatrix();
glTranslatef((GLfloat)j,(GLfloat)-i,(GLfloat)current_slice);
jet_color_set(i,j,current_slice);
draw_cube(i,j,current_slice);
glPopMatrix();
}
}
displayOverlay();
glutSwapBuffers();
glutPostRedisplay();
}
/*
* Main simulation call during display. Makes required simulation calls e.g.- for convection, etc.
* Makes call to display helper function
*/
void display3D() {
//Displays status information for the current loop
if(count%num_loops == 0) {
if(use_tolerance == 0) {
cout << setw(15) << count << setw(20) << fixed << setprecision(5) << sim_time << setw(20) << initial_time + sim_time << endl;
}
else {
cout << setw(15) << count << setw(20) << fixed << setprecision(5) << sim_time << setw(20) << initial_time + sim_time << setw(20) << max_temp_diff << endl;
}
//Saves the current state of the simulation if the save_state flag is set
if(save_state) {
save_model_state();
}
display_helper();
}
if(sim_time <= run_time) {
//Performs convection updates if the current simulation is using convection
if(using_convection) {
convection();
}
//Performs conduction calculations
conduction();
//Increments the simulation time and loop count
sim_time += time_step;
count++;
if(use_tolerance == 1) {
max_temp_diff = find_max_temp_diff();
if(max_temp_diff < tolerance) {
cout << "Maximum temperature change below the tolerance, stoping the simulation" << endl;
//Saves the final result of the simulation
if(save_state == 1 || save_result == 1) {
save_model_state();
}
save_surfer();
cout << endl << "Simulation Complete" << endl;
delete[] color_field;
deallocate_memory();
glutLeaveMainLoop();
}
}
//Updates the moving source
if(using_moving_source == 1) {
update_moving_sources();
}
}
else {
//Saves the final result of the simulation
if(save_state == 1 || save_result == 1) {
save_model_state();
}
save_surfer();
cout << endl << "Simulation Complete" << endl;
delete[] color_field;
deallocate_memory();
glutLeaveMainLoop();
}
glutPostRedisplay();
}
/*
* This captures information when the mouse buttons are pressed.
* Maintained for debugging.
*/
/*
void mouse_button(int button, int state, int x, int y) {
int mods;
if (state == GLUT_DOWN)
buttonState |= 1<<button;
else if (state == GLUT_UP)
buttonState = 0;
mods = glutGetModifiers();
if (mods & GLUT_ACTIVE_SHIFT)
{
buttonState = 2;
}
else if (mods & GLUT_ACTIVE_CTRL)
{
buttonState = 3;
}
ox = x; oy = y;
glutPostRedisplay();
}
*/
/*
* This captures mouse motion information.
* Maintained for debugging
*/
/*
void mouse_move(int x, int y) {
float dx = (float)(x - ox);
float dy = (float)(y - oy);
if (buttonState == 3)
{
// left+middle = zoom
camera_trans[2] += (dy / 100.0f) * 0.5f * fabs(camera_trans[2]);
}
else if (buttonState & 2)
{
// middle = translate
camera_trans[0] += dx / 10.0f;
camera_trans[1] -= dy / 10.0f;
}
else if (buttonState & 1)
{
// left = rotate
camera_rot[0] += dy / 5.0f;
camera_rot[1] += dx / 5.0f;
}
ox = x; oy = y;
glutPostRedisplay();
}
*/
/*
*Standard keyboard character control
*/
void keyboard(unsigned char key, int x, int y) {
switch(key) {
case '-':
if(current_slice > 0) {
current_slice--;
//camera_trans[1]-=0.5;//For camera mouse control, above
//camera_trans[2]+=1.0;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60, 1.77777f, 1.0, 20000.0);
//gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
if(num_rows > num_cols) {
gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
else {
gluLookAt(num_cols/2.0,num_rows*0.1,num_cols+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
}
break;
case '+':
if(current_slice < num_slices-1) {
current_slice++;
//camera_trans[1]+=0.5;//For camera mouse control, above
//camera_trans[2]-=1.0;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60, 1.77777f, 1.0, 20000.0);
//gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
if(num_rows > num_cols) {
gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
else {
gluLookAt(num_cols/2.0,num_rows*0.1,num_cols+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
}
break;
case 'x':
exit(0);
default:
break;
}
display_helper();
}
/*
* Special keyboard control for arrows
*/
void keyboardSpecial(int key, int x, int y) {
switch(key) {
case GLUT_KEY_UP:
if(current_slice > 0) {
current_slice--;
//camera_trans[1]-=0.5;
//camera_trans[2]+=1.0;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60, 1.77777f, 1.0, 20000.0);
//gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
if(num_rows > num_cols) {
gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
else {
gluLookAt(num_cols/2.0,num_rows*0.1,num_cols+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
}
break;
case GLUT_KEY_DOWN:
if(current_slice < num_slices-1) {
current_slice++;
//camera_trans[1]+=0.5;
//camera_trans[2]-=1.0;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60, 1.77777f, 1.0, 20000.0);
//gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
if(num_rows > num_cols) {
gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
else {
gluLookAt(num_cols/2.0,num_rows*0.1,num_cols+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
}
break;
default:
break;
}
display_helper();
}
#endif
/**
* Clears the cin buffer
*/
void clear_cin() {
cin.clear();
cin.ignore(numeric_limits <streamsize> ::max(), '\n' );
}
/**
* This function waits for the user to hit enter before continuing
*/
void PressEnterToContinue() {
cout << "Press ENTER to continue... " << flush;
clear_cin();
}
/**
* Swaps the temp arrays
*/
void swap_temp_array() {
REAL *tmp;
tmp = temp;
temp = next_temp;
next_temp = tmp;
}
void swap_temp_array_cuda() {
REAL *tmp;
tmp = dev_temp;
dev_temp = dev_next_temp;
dev_next_temp = tmp;
}
/**
* Loads the input file into program memory and allocates
* necessary memory to store the input variables
*/
void load_file() {
ifstream source_file; //Input file stream
string temp_str;
ostringstream str_conv;
//Ask for the input file names and displays an error message
//if the file does not exist
do {
cout << "Input File Name: ";
cin >> source_filename;
source_file.open(source_filename.c_str(),ios::in);
if(!source_file.is_open()) {
cout << "File Not found!" << endl;
}
} while(!source_file.is_open());
//Asks the user if the state of the model should be saved every screen update
cout << endl << "To save the state of the model every screen update enter 1, otherwise 0: ";
while(!(cin >> save_state) || save_state < 0 || save_state > 1) {
clear_cin();
cout << "Incorrect input, to save the state of the model enter 1, else 0: ";
}
if(save_state == 0) {
//Asks the user if the final result of the model should be saved
cout << endl << "To save the final result of the model enter 1, otherwise 0: ";
while(!(cin >> save_result) || save_result < 0 || save_result > 1) {
clear_cin();
cout << "Incorrect input, to save the final result of the model enter 1, else 0: ";
}
}
//Ask for the state filename if the user specified that the file should be saved
if(save_state == 1 || save_result == 1) {
cout << "Output filename: ";
cin >> output_filename;
}
//Asks for the DSAA surfer grid filenmae
cout << "Surfer filename: ";
while(!(cin >> output_su_filename) || output_su_filename.length() < 5) {
clear_cin();
cout << "Please enter a filename at least 5 characters in length: ";
}
//Loads the input file
cout << endl << endl << "Loading Input File";
//Retrieves the simulation parameters from the input file
source_file >> num_rows >> num_cols >> num_slices >> using_convection;
source_file >> chf >> initial_time;
getline(source_file,title);
getline(source_file,title);
//Calculates the number of cells in the simulation
num_cells = num_rows*num_cols*num_slices;
//Calculates the dimensions of the grid and blocks
if(num_cells <= deviceProp.maxThreadsDim[0]) {
dimBlock.x = num_cells;
dimGrid.x = 1;
}
else {
dimBlock.x = deviceProp.maxThreadsDim[0];
dimGrid.x = (int)(ceil(num_cells/(REAL)deviceProp.maxThreadsDim[0]));
}
//Calculates the amount of memory to be used by the program
unsigned long long total_mem_used = num_rows*num_cols*num_slices*(2*sizeof(REAL) + 4*sizeof(int)) + sizeof(REAL)*2*(num_rows+num_cols+num_slices);
if(using_convection) {
total_mem_used += num_rows*num_cols*num_slices*6*sizeof(int);
}
//Exits the program if the estimated amount of memory exceeds the amount of global memory
cout << endl << endl << "Estimated amount of memory usage: " << total_mem_used << endl;
if(total_mem_used > deviceProp.totalGlobalMem-FREEMEM) {
cout << "Simulation exceeds global memory limits of GPU, Exiting Program!" << endl;
exit(1);
}
//Checks the total and used amount of device global memory before allocation
size_t free_memory; //Free memory on the device
size_t total_memory; //Total memory on the device
error = hipMemGetInfo(&free_memory, &total_memory); //Retrieves the memory information for the device
if(error != hipSuccess) {
cerr << endl << "Error while getting memory information" << endl;
cerr << error << ":" << hipGetErrorString(error) << endl;
cerr << "Exiting the Program" << endl;
exit(0);
}
cout << "Free memory: "<< (unsigned int)free_memory << ", total memory: "<< (unsigned int)total_memory<<" (before initialization)" << endl;
//displays parameters of the input file
cout << endl << endl << "Total Number of Cells = " << num_cells << endl;
cout << "Number of rows = " << num_rows << endl;
cout << "Number of cols = " << num_cols << endl;
cout << "Number of slices = " << num_slices << endl;
if(using_convection == 1) {
cout << "Using convection" << endl;
}
else {
cout << "No Convection" << endl;
}
cout << endl << "Constant Heat Flow at Base of Model = " << chf << "mW M^2" << endl;
chf *= 0.001;
cout << "Model time elapsed = " << initial_time << " Years" << endl << endl;
//Calculates the number of characters for the surfer file index
str_conv << num_slices;
su_num_width = str_conv.str().length();
//Allocates memory for the conduction variables based on the previously read in simulation
//parameters
dim_x = new REAL[num_cols];
dim_y = new REAL[num_rows];
dim_z = new REAL[num_slices];
dist_x = new REAL[num_cols];
dist_y = new REAL[num_rows];
dist_z = new REAL[num_slices];
temp = new REAL[num_rows*num_cols];
next_temp = new REAL[num_rows*num_cols];
cond_codes = new int[num_rows*num_cols];
cond_hp_index = new int[num_rows*num_cols];
cond_tc_index = new int[num_rows*num_cols];
use_cond = new int[num_rows*num_cols];
//Allocates conduction specifc variables in device memory
error = hipMalloc((void **) &dev_dim_x,num_cols*sizeof(REAL));
error = hipMalloc((void **) &dev_dim_y,num_rows*sizeof(REAL));
error = hipMalloc((void **) &dev_dim_z,num_slices*sizeof(REAL));
error = hipMalloc((void **) &dev_dist_x,num_cols*sizeof(REAL));
error = hipMalloc((void **) &dev_dist_y,num_rows*sizeof(REAL));
error = hipMalloc((void **) &dev_dist_z,num_slices*sizeof(REAL));
error = hipMalloc((void **) &dev_temp,num_cols*num_rows*num_slices*sizeof(REAL));
error = hipMalloc((void **) &dev_next_temp,num_cols*num_rows*num_slices*sizeof(REAL));
error = hipMalloc((void **) &dev_cond_codes,num_cols*num_rows*num_slices*sizeof(int));
error = hipMalloc((void **) &dev_cond_hp_index,num_cols*num_rows*num_slices*sizeof(int));
error = hipMalloc((void **) &dev_cond_tc_index,num_cols*num_rows*num_slices*sizeof(int));
error = hipMalloc((void **) &dev_use_cond,num_cols*num_rows*num_slices*sizeof(int));
if(error != hipSuccess) {
cerr << "Unable to allocate device memory for conduction variables" << endl;
exit(1);
}
//Reads in the starting temperatures of the simulation from the input file
for (int k = 0; k < num_slices; k++) {
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
source_file >> temp[i*num_cols + j];
}
}
//Copies the current termperature slice to the device
error = hipMemcpy(&dev_temp[k*num_cols*num_rows],temp,num_rows*num_cols*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy Temps to device memory" << endl;
exit(1);
}
}
cout << "Read " << num_rows << " X " << num_cols << " X " << num_slices << " temps" << endl;
//Reads in the conduction codes for each cell of the simulation and parses
//the array indexs from the codes
//Unlike, the Fortran version of the program, the conduction direction codes
//are ignored since the simulation accounts for them internally
for (int k = 0; k < num_slices; k++) {
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
source_file >> temp_str;
cond_codes[i*num_cols + j] = atoi(temp_str.c_str());
cond_tc_index[i*num_cols + j] = atoi(temp_str.substr(0*INDEX_WIDTH,INDEX_WIDTH).c_str())-1;
cond_hp_index[i*num_cols + j] = atoi(temp_str.substr(1*INDEX_WIDTH,INDEX_WIDTH).c_str())-1;
use_cond[i*num_cols + j] = atoi(temp_str.substr(2*INDEX_WIDTH,1).c_str());
}
}
//Copies the current conduction code slice to device memory
error = hipMemcpy(&dev_cond_codes[k*num_rows*num_cols],cond_codes,num_rows*num_cols*sizeof(int),hipMemcpyHostToDevice);
error = hipMemcpy(&dev_cond_tc_index[k*num_rows*num_cols],cond_tc_index,num_rows*num_cols*sizeof(int),hipMemcpyHostToDevice);
error = hipMemcpy(&dev_cond_hp_index[k*num_rows*num_cols],cond_hp_index,num_rows*num_cols*sizeof(int),hipMemcpyHostToDevice);
error = hipMemcpy(&dev_use_cond[k*num_rows*num_cols],use_cond,num_rows*num_cols*sizeof(int),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy conduction codes to device memory" << endl;
exit(1);
}
}
cout << "Read " << num_rows << " X " << num_cols << " X " << num_slices << " conduction codes" << endl;
//If convection is used for the user specified input file, memory is allocated for its
//variables and they are read in from the input file
if(using_convection) {
//Allocates memory for the convection variables based on the previously read in simulation
//parameters
conv_codes = new int[num_rows*num_cols];
conv_min_temp_index = new int[num_rows*num_cols];
conv_direction = new int[num_rows*num_cols];
conv_vel_index = new int[num_rows*num_cols];
conv_fluid_index = new int[num_rows*num_cols];
conv_rock_index = new int[num_rows*num_cols];
//Allocates convection specifc variables in device memory
error = hipMalloc((void **) &dev_conv_codes,num_cols*num_rows*num_slices*sizeof(int));
error = hipMalloc((void **) &dev_conv_min_temp_index,num_cols*num_rows*num_slices*sizeof(int));
error = hipMalloc((void **) &dev_conv_direction,num_cols*num_rows*num_slices*sizeof(int));
error = hipMalloc((void **) &dev_conv_vel_index,num_cols*num_rows*num_slices*sizeof(int));
error = hipMalloc((void **) &dev_conv_fluid_index,num_cols*num_rows*num_slices*sizeof(int));
error = hipMalloc((void **) &dev_conv_rock_index,num_cols*num_rows*num_slices*sizeof(int));
if(error != hipSuccess) {
cerr << "Unable to allocate device memory for convection" << endl;
exit(1);
}
//Reads in the convection codes for each cell of the simulation and parses the array
//indexs from the ocdes
for (int k = 0; k < num_slices; k++) {
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
source_file >> temp_str;
conv_codes[i*num_cols + j] = atoi(temp_str.c_str());
conv_min_temp_index[i*num_cols + j] = atoi(temp_str.substr(0*INDEX_WIDTH,INDEX_WIDTH).c_str())-1;
conv_vel_index[i*num_cols + j] = atoi(temp_str.substr(1*INDEX_WIDTH,INDEX_WIDTH).c_str())-1;
conv_fluid_index[i*num_cols + j] = atoi(temp_str.substr(2*INDEX_WIDTH,INDEX_WIDTH).c_str())-1;
conv_rock_index[i*num_cols + j] = atoi(temp_str.substr(3*INDEX_WIDTH,INDEX_WIDTH).c_str())-1;
conv_direction[i*num_cols + j] = atoi(temp_str.substr(4*INDEX_WIDTH,2).c_str());
}
}
//Copies the current convection code slice to device memory
error = hipMemcpy(&dev_conv_codes[k*num_cols*num_rows],conv_codes,num_rows*num_cols*sizeof(int),hipMemcpyHostToDevice);
error = hipMemcpy(&dev_conv_min_temp_index[k*num_cols*num_rows],conv_min_temp_index,num_rows*num_cols*sizeof(int),hipMemcpyHostToDevice);
error = hipMemcpy(&dev_conv_direction[k*num_cols*num_rows],conv_direction,num_rows*num_cols*sizeof(int),hipMemcpyHostToDevice);
error = hipMemcpy(&dev_conv_vel_index[k*num_cols*num_rows],conv_vel_index,num_rows*num_cols*sizeof(int),hipMemcpyHostToDevice);
error = hipMemcpy(&dev_conv_fluid_index[k*num_cols*num_rows],conv_fluid_index,num_rows*num_cols*sizeof(int),hipMemcpyHostToDevice);
error = hipMemcpy(&dev_conv_rock_index[k*num_cols*num_rows],conv_rock_index,num_rows*num_cols*sizeof(int),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy convection codes to device memory" << endl;
exit(1);
}
}
cout << "Read " << num_rows << " X " << num_cols << " X " << num_slices << " convection codes" << endl;
}
//Reads in the Y (column) dimensions and finds the minimum column distance
for(int i = 0; i < num_cols; i++) {
source_file >> dim_x[i];
if(i == 0) {
min_col_dim = dim_x[0];
dist_x[0] = dim_x[0]/2.0;
}
else {
if(dim_x[i] < min_col_dim) {
min_col_dim = dim_x[i];
}
dist_x[i] = dist_x[i-1] + dim_x[i-1]/2.0 + dim_x[i]/2.0;
}
}
max_dist_x = dist_x[num_cols-1] + dim_x[num_cols-1]/2.0;
//Copies the x dimensions and distances to device memory
error = hipMemcpy(dev_dim_x,dim_x,num_cols*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy x dimensions to device" << endl;
exit(1);
}
error = hipMemcpy(dev_dist_x,dist_x,num_cols*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy x dimensions to device" << endl;
exit(1);
}
//Reads in the X (row) dimensions and finds the minimum row distance
for(int i = 0; i < num_rows; i++) {
source_file >> dim_y[i];
if(i == 0) {
min_row_dim = dim_y[i];
dist_y[0] = dim_y[0]/2.0;
}
else {
if(dim_y[i] < min_row_dim) {
min_row_dim = dim_y[i];
}
dist_y[i] = dist_y[i-1] + dim_y[i-1]/2.0 + dim_y[i]/2.0;
}
}
max_dist_y = dist_y[num_rows-1] + dim_y[num_rows-1]/2.0;
//Copies the y dimensions and distances to device memory
error = hipMemcpy(dev_dim_y,dim_y,num_rows*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy y dimensions to device" << endl;
exit(1);
}
error = hipMemcpy(dev_dist_y,dist_y,num_rows*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy y dimensions to device" << endl;
exit(1);
}
//Reads in the Z (slice depth) dimension and finds the minimum row distance
for (int i = 0; i < num_slices; i++) {
source_file >> dim_z[i];
if (i == 0) {
min_slice_dim = dim_z[i];
dist_z[0] = dim_z[0]/2.0;
}
else {
if (dim_z[i] < min_slice_dim) {
min_slice_dim = dim_z[i];
}
dist_z[i] = dist_z[i-1] + dim_z[i-1]/2.0 + dim_z[i]/2.0;
}
}
max_dist_z = dist_z[num_slices-1] + dim_z[num_slices-1]/2.0;
//Copies the z dimensions and distances to device memory
error = hipMemcpy(dev_dim_z,dim_z,num_slices*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy y dimensions to device" << endl;
exit(1);
}
error = hipMemcpy(dev_dist_z,dist_z,num_slices*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy y dimensions to device" << endl;
exit(1);
}
//Reads in the conduction heat production values
source_file >> num_hp;
heat_production_values = new REAL[num_hp];
for(int i = 0; i < num_hp; i++) {
source_file >> heat_production_values[i];
heat_production_values[i] /= 1E6;
}
//Allocates and copies the heat production values to device memory
error = hipMalloc((void **) &dev_heat_production_values,num_hp*sizeof(REAL));
error = hipMemcpy(dev_heat_production_values,heat_production_values,num_hp*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy heat production values to device" << endl;
exit(1);
}
cout << "Read "<< num_hp << " heat production values" << endl;
//Reads in the thermal conduction difference values
//Finds the minimum and maximum thermal conductivity differences and
//performs some scaling of the conduction associated variables
source_file >> num_tcd;
thermal_conduct_diff = new REAL[num_tcd];
cout << "Converted " << num_tcd << " Thermal Conductivities to Diff. in m^2/y" << endl;
for(int i = 0; i < num_tcd; i++) {
source_file >> thermal_conduct_diff[i];
thermal_conduct_diff[i] *= 14.33;
if(i == 0) {
max_thermal_conduct_diff = thermal_conduct_diff[0];
min_thermal_conduct_diff = thermal_conduct_diff[0];
}
else {
if(thermal_conduct_diff[i] > max_thermal_conduct_diff) {
max_thermal_conduct_diff = thermal_conduct_diff[i];
}
if(thermal_conduct_diff[i] < min_thermal_conduct_diff) {
min_thermal_conduct_diff = thermal_conduct_diff[i];
}
}
cout << " " << thermal_conduct_diff[i];
}
//Allocates and copies the thermal conductivity difference values to device memory
error = hipMalloc((void **) &dev_thermal_conduct_diff,num_tcd*sizeof(REAL));
error = hipMemcpy(dev_thermal_conduct_diff,thermal_conduct_diff,num_tcd*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy thermal conductivities to device" << endl;
exit(1);
}
//Reads in the convection specific variables if convection
//is used by the user specified input file
if(using_convection) {
//Reads in the fluid heat capacity values
source_file >> num_hcf;
heat_capac_fluid = new REAL[num_hcf];
for(int i = 0; i < num_hcf; i++) {
source_file >> heat_capac_fluid[i];
}
//Allocates and copies the fluid heat capacity values to device memory
error = hipMalloc((void **) &dev_heat_capac_fluid,num_hcf*sizeof(REAL));
error = hipMemcpy(dev_heat_capac_fluid,heat_capac_fluid,num_hcf*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy fluid heat capacity to device" << endl;
exit(1);
}
//Reads in the rock heat capacity values
source_file >> num_hcr;
heat_capac_rock = new REAL[num_hcr];
for(int i = 0; i < num_hcr; i++) {
source_file >> heat_capac_rock[i];
}
//Allocates and copies the rock heat capacity values to device memory
error = hipMalloc((void **) &dev_heat_capac_rock,num_hcr*sizeof(REAL));
error = hipMemcpy(dev_heat_capac_rock,heat_capac_rock,num_hcr*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy rock heat capacity to device" << endl;
exit(1);
}
//Reads in the minimum convection temperatures
source_file >> num_mtc;
min_temp_conv = new REAL[num_mtc];
for(int i = 0; i < num_mtc; i++) {
source_file >> min_temp_conv[i];
}
//Allocates and copies the minimum convection temperature values to device memory
error = hipMalloc((void **) &dev_min_temp_conv,num_mtc*sizeof(REAL));
error = hipMemcpy(dev_min_temp_conv,min_temp_conv,num_mtc*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy minimum temp for convection to device" << endl;
exit(1);
}
//Reads in the convection velocities
source_file >> num_vel;
vel = new REAL[num_vel];
for(int i = 0; i < num_vel; i++) {
source_file >> vel[i];
}
cout << endl << "Read " << num_vel << " Velocities in m/yr" << endl;
//Allocates and copies the convection velocities to device memory
error = hipMalloc((void **) &dev_vel,num_vel*sizeof(REAL));
error = hipMemcpy(dev_vel,vel,num_vel*sizeof(REAL),hipMemcpyHostToDevice);
if(error != hipSuccess) {
cerr << "Unable to copy convection velocities to device" << endl;
exit(1);
}
//Finds the maximum convection velocity
max_vel = vel[0];
cout << " " << vel[0];
for(int i = 1 ; i < num_vel; i++) {
if(vel[i] > max_vel) {
max_vel = vel[i];
}
cout << " " << vel[i];
}
cout << endl;
}
//Closes the input file
source_file.close();
/*
T1 = max_thermal_conduct_diff;
T2 = min_col_dim;
T3 = min_row_dim;
*/
//Finds the convection time increment
if(using_convection) {
if(min_col_dim > min_row_dim) {
tic = min_row_dim/max_vel;
}
else {
tic = min_col_dim/max_vel;
}
}
//Calculates the maximum time step of the simulation
if(min_col_dim < min_row_dim) {
time_step = min_col_dim*min_col_dim/(5*max_thermal_conduct_diff);
}
else {
time_step = min_row_dim*min_row_dim/(5*max_thermal_conduct_diff);
}
cout << endl << "Done Loading Input File" << endl;
}
/**
* Saves the current state of the simulation, using the same format
* as the input file
*/
void save_model_state() {
ofstream output_file; //Output file stream
//Opens the output file for writing
output_file.open(output_filename.c_str(),ios::out);
if(!output_file.is_open()) {
cerr << "Failed to write state to file" << endl;
exit(1);
}
else {
//Prints the simulation parameters to the output file
output_file << setw(20) << num_rows << " " << setw(20) << num_cols << " " << setw(20) << num_slices << setw(20) << using_convection << endl;
output_file << setw(20) << fixed << setprecision(OUT_PRECISION) << chf*1000.0 << " " << setw(20) << initial_time + sim_time << endl;
output_file << title << endl;
output_file << setprecision(OUT_PRECISION);
//Prints the current temperature array of the simulation
for (int k = 0; k < num_slices; k++) {
//Copies the current temperature slice into host memory
error = hipMemcpy(temp,&dev_temp[k*num_rows*num_cols],num_rows*num_cols*sizeof(REAL),hipMemcpyDeviceToHost);
if(error != hipSuccess) {
cerr << "Unable to copy convection velocities to device" << endl;
exit(1);
}
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
output_file << " " << setw(OUT_PRECISION+5) << temp[i*num_cols + j];
}
output_file << endl;
}
output_file << endl;
}
//Prints the conduction codes of the simulation to the output file
output_file << setfill('0');
for (int k = 0; k < num_slices; k++) {
//Copies the current conduction code slice into host memory
error = hipMemcpy(cond_codes,&dev_cond_codes[k*num_rows*num_cols],num_rows*num_cols*sizeof(int),hipMemcpyDeviceToHost);
if(error != hipSuccess) {
cerr << "Unable to copy convection velocities to device" << endl;
exit(1);
}
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
output_file << " " << setw(2*INDEX_WIDTH+1) << cond_codes[i*num_cols + j];
}
output_file << endl;
}
output_file << endl;
}
//Prints the convection codes to the output file if convection is being used
if(using_convection) {
for (int k = 0; k < num_slices; k++) {
//Copies the current convection code slice into host memory
error = hipMemcpy(conv_codes,&dev_conv_codes[k*num_rows*num_cols],num_rows*num_cols*sizeof(int),hipMemcpyDeviceToHost);
if(error != hipSuccess) {
cerr << "Unable to copy convection velocities to device" << endl;
exit(1);
}
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
output_file << " " << setw(4*INDEX_WIDTH+2) << conv_codes[i*num_cols + j];
}
output_file << endl;
}
output_file << endl;
}
}
output_file << setfill(' ');
output_file << setprecision(3);
//Prints the column (X) dimensions of the simulation to the output file
for(int i = 0; i < num_cols; i++) {
output_file << " " << dim_x[i];
}
output_file << endl;
//Prints the row (Y) dimensions of the simulation to the output file
for(int i = 0; i < num_rows; i++) {
output_file << " " << dim_y[i];
}
output_file << endl;
// Prints the slice (Z) dimensions of the simulation to the oputput file
for (int i = 0; i < num_slices; i++) {
output_file << " " << dim_z[i];
}
output_file << endl;
//Prints the heat production values of the simulation to the output file
output_file << " " << num_hp;
for(int i = 0; i < num_hp; i++) {
output_file << " " << scientific << heat_production_values[i]*1E6;
}
output_file << endl;
//Prints the thermal conductivity difference values to the output file
output_file << " " << num_tcd;
for(int i = 0; i < num_tcd; i++) {
output_file << " " << thermal_conduct_diff[i]/14.33;
}
output_file << endl;
//Prints the convection specific variables to the output file if convection is used
if(using_convection) {
//Prints the fluid heat capacity values to the output file
output_file << " " << num_hcf;
for(int i = 0; i < num_hcf; i++) {
output_file << " " << heat_capac_fluid[i];
}
output_file << endl;
//Prints the rock heat capacity values to the output file
output_file << " " << num_hcr;
for(int i = 0; i < num_hcr; i++) {
output_file << " " << heat_capac_rock[i];
}
output_file << endl;
//Prints the minimum convection temps to the output file
output_file << " " << num_mtc;
for(int i = 0; i < num_mtc; i++) {
output_file << " " << min_temp_conv[i];
}
output_file << endl;
//Prints the convection velocities to the output file
output_file << " " << num_vel;
for(int i = 0; i < num_vel; i++) {
output_file << " " << vel[i];
}
output_file << endl;
}
//Closes the output file
output_file.close();
}
}
/**
* Saves the current temperatures of the simulation to a DSAA surfer grid file
*/
void save_surfer() {
ofstream output_file; //Output file stream
ostringstream oss;
string filename, extension;
filename = output_su_filename.substr(0,output_su_filename.length()-4);
extension = output_su_filename.substr(output_su_filename.length()-4,4);
for(int k = 0; k < num_slices; k++) {
oss.str("");
oss.clear();
oss << filename << setfill('0') << setw(su_num_width) << k << extension;
//Opens the output file for writting
output_file.open(oss.str().c_str(),ios::out);
if(!output_file.is_open()) {
cerr << "Failed to write surfer file" << endl;
exit(1);
}
else {
REAL min_temp, max_temp, temp_range; //Minimum and maximum temperatures.
REAL xmax,ymin; //Maximum x and minimum y distances
//Copies the current temperature slice to host memory
error = hipMemcpy(temp,&dev_temp[k*num_rows*num_cols],num_rows*num_cols*sizeof(REAL),hipMemcpyDeviceToHost);
if(error != hipSuccess) {
cerr << "Unable to copy convection velocities to device" << endl;
exit(1);
}
//Finds the minimum and maximum temps in the temperature array
min_temp = max_temp = temp[0];
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
if(temp[i*num_cols + j] > max_temp) {
max_temp = temp[i*num_cols + j];
}
if(temp[i*num_cols + j] < min_temp) {
min_temp = temp[i*num_cols + j];
}
}
}
//Calculates the temperature range.
temp_range = max_temp - min_temp;
if(temp_range == 0) {
temp_range = 1.0;
}
//Calculates the maximum x distance and the
//minimum y distance
xmax = dim_x[0]*num_cols;
ymin = dim_y[0]*num_rows;
if(dim_x[0] < 0.01) {
xmax *= 1000;
}
else if(dim_x[0] < 0.1) {
xmax *= 100;
}
else if(dim_x[0] < 1) {
xmax *= 10;
}
if(dim_y[0] < 0.01) {
ymin *= 1000;
}
else if(dim_y[0] < 0.1) {
ymin *= 100;
}
else if(dim_y[0] < 1) {
ymin *= 10;
}
//Prints the DSAA surfer grid parameters to the output file
output_file << "DSAA" << endl;
output_file << setw(20) << num_cols << " " << setw(20) << num_rows << endl;
output_file << fixed << setprecision(3) << setw(20) << 0.0 << " " << setw(20) << xmax << endl;
output_file << setw(20) << -ymin << " " << setw(20) << 0.0 << endl;
output_file << setw(20) << setprecision(OUT_PRECISION) << min_temp << " " << setw(20) << max_temp << endl;
//Prints the temperature array to the output file
for(int i = num_rows-1; i >= 0; i--) {
for(int j = 0; j < num_cols; j++) {
output_file << " " << setw(OUT_PRECISION+5) << temp[i*num_cols + j];
}
output_file << endl;
}
//Closes the output file
output_file.close();
}
}
}
/**
* Calculates and returns the heat flow per year between two cells in the X direction
* based on the provided indexes
*/
__device__ REAL cond_add_x(int row1, int col1, int slice1, int row2, int col2, int slice2, int num_rows, int num_cols, int num_slices, REAL *dim_x, REAL *dim_y, REAL *dim_z, REAL *temp, REAL *next_temp, REAL *thermal_conduct_diff, int *cond_tc_index) {
REAL temp_diff; //Temperature difference between the two cells
REAL ad; //
temp_diff = temp[slice1*num_rows*num_cols + row1*num_cols + col2] - temp[slice1*num_rows*num_cols + row1*num_cols + col1];
ad = dim_x[col2]/thermal_conduct_diff[cond_tc_index[slice1*num_rows*num_cols + row1*num_cols + col2]] + dim_x[col1]/thermal_conduct_diff[cond_tc_index[slice1*num_rows*num_cols + row1*num_cols + col1]];
return 2*temp_diff/(ad*dim_x[col1]);
}
/**
* Calculates and returns the heat flow per year between two cells in the Y direction
* based on the provided indexes
*/
__device__ REAL cond_add_y(int row1, int col1, int slice1, int row2, int col2, int slice2, int num_rows, int num_cols, int num_slices, REAL *dim_x, REAL *dim_y, REAL *dim_z, REAL *temp, REAL *next_temp, REAL *thermal_conduct_diff, int *cond_tc_index) {
REAL temp_diff; //Temperature difference between the two cells
REAL ad; //
temp_diff = temp[slice1*num_rows*num_cols + row2*num_cols + col1] - temp[slice1*num_rows*num_cols + row1*num_cols + col1];
ad = dim_y[row2]/thermal_conduct_diff[cond_tc_index[slice1*num_rows*num_cols + row2*num_cols + col1]] + dim_y[row1]/thermal_conduct_diff[cond_tc_index[slice1*num_rows*num_cols + row1*num_cols + col1]];
return 2*temp_diff/(ad*dim_y[row1]);
}
/**
* Calculates and returns the heat flow per year between two cells in the Z direction
* based on the provided indexes
*/
__device__ REAL cond_add_z(int row1, int col1, int slice1, int row2, int col2, int slice2, int num_rows, int num_cols, int num_slices, REAL *dim_x, REAL *dim_y, REAL *dim_z, REAL *temp, REAL *next_temp, REAL *thermal_conduct_diff, int *cond_tc_index) {
if(num_slices == 1) {
return 0.0;
}
REAL temp_diff;
REAL ad;
temp_diff = temp[slice2*num_rows*num_cols + row1*num_cols + col1] - temp[slice1*num_rows*num_cols + row1*num_cols + col1];
ad = dim_z[slice2]/thermal_conduct_diff[cond_tc_index[slice2*num_rows*num_cols + row1*num_cols + col1]] + dim_z[slice1]/thermal_conduct_diff[cond_tc_index[slice1*num_rows*num_cols + row1*num_cols + col1]];
return 2*temp_diff/(ad*dim_z[slice1]);
}
/**
* Calculates the in-plane heat flow due to conduction in a given slice k.
* If slices == 1
* 2d simulation, return 0 for 3rd dimension heat transfer
* else
* Calculate and return heat flow per year between two cells in the Z direction
*/
__device__ REAL in_plane_cond(int i, int j, int k, int num_rows, int num_cols, int num_slices, REAL *dim_x, REAL *dim_y, REAL *dim_z, REAL DHF, REAL *temp, REAL *next_temp, REAL *thermal_conduct_diff, int *cond_tc_index) {
REAL heat_flow_x;
REAL heat_flow_y;
/* k is fixed */
if(i == 0 && j == 0) { //Top left corner of slice
heat_flow_x = cond_add_x(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = 0.0;
}
else if(i == 0 && j == num_cols-1) { //Top right corner of slice
heat_flow_x = cond_add_x(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = 0.0;
}
else if(i == 0) { //Top of slice
heat_flow_x = cond_add_x(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_x(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = 0.0;
}
else if(i == num_rows-1 && j == 0) { //Bottom left corner of slice
heat_flow_x = cond_add_x(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = DHF/dim_y[i]; //Constant heat flow at the bottom of the model
}
else if(i == num_rows-1 && j == num_cols-1) { //Bottom right corner of slice
heat_flow_x = cond_add_x(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = DHF/dim_y[i]; //Constant heat flow at the bottom of the model
}
else if(i == num_rows-1) { //Bottom
heat_flow_x = cond_add_x(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_x(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = DHF/dim_y[i]; //Constant heat flow at the bottom of the model
}
else if(j == 0) { //Left side of slice
heat_flow_x = cond_add_x(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_y(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = 0.0;
}
else if(j == num_cols-1) { //Right side of slice
heat_flow_x = cond_add_x(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_y(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = 0.0;
}
else { //Middle of slice
heat_flow_x = cond_add_x(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_x(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_y(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = 0.0;
}
return (heat_flow_x + heat_flow_y);
}
/**
* Conduction Kernel
* Updates the temperature array using 3D conduction with finite
* difference heat flow.
*/
__global__ void conduction_kernel(int num_cells, int num_rows, int num_cols, int num_slices, REAL *dim_x, REAL *dim_y, REAL *dim_z, REAL DHF, REAL time_step, REAL *temp, REAL *next_temp, int *use_cond, REAL *heat_production_values, int *cond_hp_index, REAL *thermal_conduct_diff, int *cond_tc_index){
unsigned long long id = blockIdx.x*blockDim.x+threadIdx.x; //Thread ID
if(id < num_cells) {
int k = id/(num_rows*num_cols);
int i = (id- k*num_rows*num_cols)/num_cols;
int j = id - k*num_rows*num_cols - i*num_cols;
if(use_cond[k*num_rows*num_cols + i*num_cols + j] == 1) {
REAL heatflow_in_plane; //Heat flow occuring inside of plane
REAL heatflow_cross_plane; //Heat flow into and out of plane/slice
if (k == 0) { // First slice
heatflow_in_plane = in_plane_cond(i,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,DHF,temp,next_temp,thermal_conduct_diff,cond_tc_index); // heat transfer inside of plane
heatflow_cross_plane = cond_add_z(i,j,k,i,j,k+1,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index); // slice-to-slice heat transfer. first slice, so only from next slice transfers heat.
}
else if (k == num_slices - 1) { // Last slice
heatflow_in_plane = in_plane_cond(i,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,DHF,temp,next_temp,thermal_conduct_diff,cond_tc_index); // heat transfer inside of plane
heatflow_cross_plane = cond_add_z(i,j,k,i,j,k-1,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index); // slice-to-slice heat transfer. last slice, so only previous slice transfers heat.
}
else { // Middle
heatflow_in_plane = in_plane_cond(i,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,DHF,temp,next_temp,thermal_conduct_diff,cond_tc_index); // you get the idea
heatflow_cross_plane = cond_add_z(i,j,k,i,j,k+1,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_z(i,j,k,i,j,k-1,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index); // slice-to-slice heat transfer. Middle, so both next and previous.
}
//Heat flow from the adjacent cells
next_temp[k*num_rows*num_cols + i*num_cols + j] += temp[k*num_rows*num_cols + i*num_cols + j] + time_step*(heatflow_in_plane + heatflow_cross_plane);
//Heat flow due to radioactive heat production
next_temp[k*num_rows*num_cols + i*num_cols + j] += heat_production_values[cond_hp_index[k*num_rows*num_cols + i*num_cols + j]]*time_step/DTC;
}
else {
next_temp[k*num_rows*num_cols + i*num_cols + j] = temp[k*num_rows*num_cols + i*num_cols + j];
}
}
}
/**
* Wrapper Function for the conduction kernel
*/
void conduction_cuda() {
//Calls the conduction kernel
hipLaunchKernelGGL(( conduction_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, num_cells,num_rows,num_cols,num_slices,dev_dim_x,dev_dim_y,dev_dim_z,DHF,time_step,dev_temp,dev_next_temp,dev_use_cond,dev_heat_production_values,dev_cond_hp_index,dev_thermal_conduct_diff,dev_cond_tc_index);
//Waits for the kernel to finish executing
hipDeviceSynchronize();
//Checks if an error occured during execution of the kernel
error = hipGetLastError();
if(error != hipSuccess) {
cerr << "Error while executing conduction kernel" << endl;
cerr << error << " : " << hipGetErrorString(error) << endl;
cerr << "Exiting the Program" << endl;
exit(1);
}
//Swaps the device temperature arrays
swap_temp_array_cuda();
}
/**
* Performs convection between two specified cells
*/
__device__ void perform_convection(int row1, int col1, int slice1, int row2, int col2, int slice2, int num_rows, int num_cols, int num_slices, REAL *dist_x, REAL *dist_y, REAL *dist_z, REAL time_inc, REAL *temp, REAL *next_temp, REAL *min_temp_conv, int *conv_min_temp_index, REAL *heat_capac_fluid, int *conv_fluid_index, REAL *heat_capac_rock, int *conv_rock_index, REAL *vel, int *conv_vel_index) {
REAL avg_x_dim; //distance between two temperature cells in the x direction
REAL avg_y_dim; //distance between two temperature cells in the y direction
REAL avg_z_dim; //distance between two temperature cells in the z direction
REAL amt; //
REAL dist; //Distance between the two cells
REAL ratio; //Ratio of amt to distance
//Checks if the specified cell is within the bounds of the simulation and if it has a high enough
//temperature to perform convection
if((row2 >= 0) && (row2 < num_rows) && (col2 >= 0) && (col2 < num_cols) && (slice2 >= 0) && (slice2 < num_slices) && (temp[slice2*num_rows*num_cols + row2*num_cols + col2] - min_temp_conv[conv_min_temp_index[slice1*num_rows*num_cols + row1*num_cols + col1]] >= 0)) {
avg_x_dim = dist_x[col1] - dist_x[col2];
avg_y_dim = dist_y[row1] - dist_y[row2];
avg_z_dim = dist_z[slice1] - dist_z[slice2];
amt = (vel[conv_vel_index[slice1*num_rows*num_cols + row1*num_cols + col1]]*heat_capac_fluid[conv_fluid_index[slice1*num_rows*num_cols + row1*num_cols + col1]]/heat_capac_rock[conv_rock_index[slice1*num_rows*num_cols + row1*num_cols + col1]])*time_inc;
dist = sqrt(avg_x_dim*avg_x_dim + avg_y_dim*avg_y_dim + avg_z_dim*avg_z_dim);
ratio = amt/dist;
if(ratio > 1) {
ratio = 0.999999;
}
next_temp[slice1*num_rows*num_cols + row1*num_cols + col1] = temp[slice1*num_rows*num_cols + row1*num_cols + col1] + ratio *(temp[slice2*num_rows*num_cols + row2*num_cols + col2]-temp[slice1*num_rows*num_cols + row1*num_cols + col1]);
}
else {
next_temp[slice1*num_rows*num_cols + row1*num_cols + col1] = temp[slice1*num_rows*num_cols + row1*num_cols + col1];
}
}
/**
* Convection Kernel
* Updates the temperature array using convection
*/
__global__ void convection_kernel(unsigned long long num_cells, int num_rows, int num_cols, int num_slices, REAL *dist_x, REAL *dist_y, REAL *dist_z, REAL time_inc, REAL *temp, REAL *next_temp, int *conv_codes, int *conv_direction, REAL *min_temp_conv, int *conv_min_temp_index, REAL *heat_capac_fluid, int *conv_fluid_index, REAL *heat_capac_rock, int *conv_rock_index, REAL *vel, int *conv_vel_index) {
unsigned long long id = blockIdx.x*blockDim.x+threadIdx.x; //Thread ID
if(id < num_cells) {
int k = id/(num_rows*num_cols);
int i = (id- k*num_rows*num_cols)/num_cols;
int j = id - k*num_rows*num_cols - i*num_cols;
//Checks if convection can occur for the specified cell
if((conv_codes[k*num_rows*num_cols + i*num_cols + j] <= 0) || (i == 0) || (conv_direction[k*num_rows*num_cols + i*num_cols + j] == 5) || (conv_direction[k*num_rows*num_cols + i*num_cols + j] < 1) || (conv_direction[k*num_rows*num_cols + i*num_cols + j] > 27)) {
next_temp[k*num_rows*num_cols + i*num_cols + j] = temp[k*num_rows*num_cols + i*num_cols + j];
}
else {
//Performs convection based on the convection direction code
switch(conv_direction[k*num_rows*num_cols + i*num_cols + j]) {
/**
* IN-PLANE convection -- 1 through 9. These codes are for convection taking place in the current, "k-th" plane
* 1 2 3
* 4 5 6
* 7 8 9
*/
case 1:
perform_convection(i,j,k,i-1,j-1,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 2:
perform_convection(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 3:
perform_convection(i,j,k,i-1,j+1,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 4:
perform_convection(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 6:
perform_convection(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 7:
perform_convection(i,j,k,i+1,j-1,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 8:
perform_convection(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 9:
perform_convection(i,j,k,i+1,j+1,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
/**
* CROSS-PLANE convection (previous "k-1th" plane) -- 10 through 18
* 10 11 12
* 13 14 15
* 16 17 18
*/
case 10:
perform_convection(i,j,k,i-1,j-1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 11:
perform_convection(i,j,k,i-1,j,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 12:
perform_convection(i,j,k,i-1,j+1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 13:
perform_convection(i,j,k,i,j-1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 14:
perform_convection(i,j,k,i,j,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 15:
perform_convection(i,j,k,i,j+1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 16:
perform_convection(i,j,k,i+1,j-1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 17:
perform_convection(i,j,k,i+1,j,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 18:
perform_convection(i,j,k,i+1,j+1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
/**
* CROSS-PLANE convection ("k+1th" plane) -- 19 through 27
* 19 20 21
* 22 23 24
* 25 26 27
*/
case 19:
perform_convection(i,j,k,i-1,j-1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 20:
perform_convection(i,j,k,i-1,j,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 21:
perform_convection(i,j,k,i-1,j+1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 22:
perform_convection(i,j,k,i,j-1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 23:
perform_convection(i,j,k,i,j,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 24:
perform_convection(i,j,k,i,j+1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 25:
perform_convection(i,j,k,i+1,j-1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 26:
perform_convection(i,j,k,i+1,j,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 27:
perform_convection(i,j,k,i+1,j+1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
}
}
}
}
/**
* Wrapper function for the convection kernel
*/
void convection_cuda() {
for(int i = 0; i < num_conv_loops; i++) {
//Calls the convection kernel
hipLaunchKernelGGL(( convection_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, num_cells,num_rows,num_cols,num_slices,dev_dist_x,dev_dist_y,dev_dist_z,time_inc,dev_temp,dev_next_temp,dev_conv_codes,dev_conv_direction,dev_min_temp_conv,dev_conv_min_temp_index,dev_heat_capac_fluid,dev_conv_fluid_index,dev_heat_capac_rock,dev_conv_rock_index,dev_vel,dev_conv_vel_index);
//Waits for the kernel to finish executing
hipDeviceSynchronize();
//Checks if an error occured during execution
error = hipGetLastError();
if(error != hipSuccess) {
cerr << "Error while executing convection kernel" << endl;
cerr << error << " : " << hipGetErrorString(error) << endl;
cerr << "Exiting the Program" << endl;
exit(1);
}
//Swaps the device temperature arrays
swap_temp_array_cuda();
}
}
/**
* Finds and returns the maximum temperature difference between
* the current and next temperature arrays.
*/
REAL find_max_temp_diff() {
REAL max_diff = fabs(next_temp[0] - temp[0]);
REAL diff = 0.0;
for(int k = 0; k < num_slices; k++) {
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
diff = fabs(next_temp[k*num_rows*num_cols + i*num_cols + j] - temp[k*num_rows*num_cols + i*num_cols + j]);
if(diff > max_diff) {
max_diff = diff;
}
}
}
}
return max_diff;
}
/**
* Finds the index of a given x, y, and z value in meters and
* stores them in the index array
*/
void find_loc_index(REAL x_loc, REAL y_loc, REAL z_loc, int *index){
if(x_loc < 0) {
index[0] = -1;
}
else if(x_loc > max_dist_x) {
index[0] = num_cols;
}
else {
for(index[0] = 0; index[0] < num_cols; index[0]++) {
if(x_loc <= dist_x[index[0]]+dim_x[index[0]]/2.0) {
break;
}
}
}
if(y_loc < 0) {
index[1] = -1;
}
else if(y_loc > max_dist_y) {
index[1] = num_rows;
}
else {
for(index[1] = 0; index[1] < num_rows; index[1]++) {
if(y_loc <= dist_y[index[1]]+dim_y[index[1]]/2.0) {
break;
}
}
}
if(z_loc < 0) {
index[2] = -1;
}
else if(z_loc > max_dist_z) {
index[2] = num_slices;
}
else {
for(index[2] = 0; index[2] < num_slices; index[2]++) {
if(z_loc <= dist_x[index[2]]+dim_x[index[2]]/2.0) {
break;
}
}
}
}
/**
* Finds the indexes of two corners of the moving source
* if either falls within the model. The valid parts of the
* moving source are updated with the moving sources temperature
*/
void update_mvsrc(int index) {
if(mvsrc_valid[index] == 1) {
int loc_index[3], loc_offset_index[3];
find_loc_index(mvsrc_x[index],mvsrc_y[index],mvsrc_z[index],loc_index);
find_loc_index(mvsrc_x[index]+mvsrc_offset_x[index],mvsrc_y[index]+mvsrc_offset_y[index],mvsrc_z[index]+mvsrc_offset_z[index],loc_offset_index);
if((loc_index[0] >= 0 && loc_index[0] < num_cols && loc_index[1] >= 0 && loc_index[1] < num_rows && loc_index[2] >= 0 && loc_index[2] < num_slices) || (loc_offset_index[0] >= 0 && loc_offset_index[0] < num_cols && loc_offset_index[1] >= 0 && loc_offset_index[1] < num_rows && loc_offset_index[2] >= 0 && loc_offset_index[2] < num_slices)) {
for(int k = loc_index[2]; k <= loc_offset_index[2]; k++) {
for(int i = loc_index[1]; i <= loc_offset_index[1]; i++) {
for(int j = loc_index[0]; j <= loc_offset_index[0]; j++) {
if(i >= 0 && i < num_rows && j >= 0 && j < num_cols && k >= 0 && k < num_slices) {
temp[k*num_rows*num_cols + i*num_cols + j] = mvsrc_temp[index];
}
}
}
}
}
else {
mvsrc_valid[index] = 0;
}
}
}
/**
* Updates the moving sources velocity and position vectors
* then updates the temperatures in the current temp array
*/
void update_moving_sources() {
for(int i = 0; i < num_mvsrc; i++) {
mvsrc_vel_x[i] += mvsrc_accel_x[i]*time_step;
mvsrc_vel_y[i] += mvsrc_accel_y[i]*time_step;
mvsrc_vel_z[i] += mvsrc_accel_z[i]*time_step;
mvsrc_x[i] += mvsrc_vel_x[i]*time_step;
mvsrc_y[i] += mvsrc_vel_y[i]*time_step;
mvsrc_z[i] += mvsrc_vel_z[i]*time_step;
update_mvsrc(i);
}
}
/**
* Performs a finite heat flow simulation using
* conduction and convection.
*/
int main(int argc, char **argv) {
#ifdef DISPLAY
cout << "\t\t Finite Difference Heat Flow Simulation" << endl;
//Asks the user if they wish to visualize results
cout << endl << "Press 1 to run visualization, otherwise 0: ";
while(!(cin >> display_mode) || display_mode < 0 || display_mode > 1) {
clear_cin();
cout << "Incorrect input, to save the state of the model enter 1, else 0: ";
}
#else
cout << "\t\t Finite Difference Heat Flow Simulation" << endl;
#endif
int input_val; //Temporary int value
REAL temp_val; //Temporary REAL value
//Sets the current device
error = hipSetDevice(0);
//Retrieves the properties of the device
error = hipGetDeviceProperties(&deviceProp, 0);
if(error != hipSuccess) {
cerr << endl << "Error while retrieving device properties" << endl;
cerr << error << ":" << hipGetErrorString(error) << endl;
cerr << "Exiting the Program" << endl;
exit(0);
}
//Loads the input file for the simulation
load_file();
//Checks the total and used amount of device global memory after allocation
size_t free_memory; //Free memory on the device
size_t total_memory; //Total memory on the device
error = hipMemGetInfo(&free_memory, &total_memory); //Retrieves the memory information for the device
if(error != hipSuccess) {
cerr << endl << "Error while getting memory information" << endl;
cerr << error << ":" << hipGetErrorString(error) << endl;
cerr << "Exiting the Program" << endl;
exit(0);
}
cout << "Free memory: "<< (unsigned int)free_memory << ", total memory: "<< (unsigned int)total_memory<<" (after initialization)" << endl;
/**
* Allows the user to change multiple rectangular blocks of temperatures
* within the model
*/
/*
cout << endl << endl << "To Change the Temp. on a Block, Enter 1, Else 0: ";
while(!(cin >> input_val) || input_val < 0 || input_val > 1) {
clear_cin();
cout << "Incorrect Input, Enter 1 to Change, Else 0: ";
}
//Warning, the row column pairs need to be space seperated not comma seperated
if(input_val == 1) {
int num_block, row1, row2, col1, col2, slice1, slice2;
REAL new_temp;
cout << "Enter the Number of Blocks to Change: ";
while(!(cin >> num_block) || num_block < 0) {
clear_cin();
cout << "Enter a number greater than or equal to 0: ";
}
for(int i = 0; i < num_block; i++) {
cout << endl << "Block " << i << endl;
cout << "Enter the Coordinates of the Upper Left Corner <row> <column> <slice>: ";
while(!(cin >> row1 >> col1 >> slice1) || row1 < 0 || col1 < 0 || slice1 < 0) {
clear_cin();
cout << "Incorrect input, enter three positive numbers with spaces: ";
}
cout << "Enter the Coordinates of the Lower Right Corner <row> <column> <slice>: ";
while(!(cin >> row2 >> col2 >> slice2) || row2 < row1 || col2 < col1 || slice2 < slice1) {
clear_cin();
cout << "Incorrect input, enter three positive numbers with spaces: ";
}
cout << endl << "Current Block Temps" << endl;
cout << setw(10) << "row" << " " << setw(10) << "col" << " " << setw(10) << "slice" << " " << setw(OUT_PRECISION+5) << "temp" << endl;
cout << setw(10) << row1 << " " << setw(10) << col1 << " " << setw(10) << slice1 << setw(OUT_PRECISION+5) << fixed << setprecision(OUT_PRECISION) << temp[slice1*num_rows*num_cols + row1*num_cols + col1] << endl;
cout << setw(10) << row2 << " " << setw(10) << col2 << " " << setw(10) << slice2 << setw(OUT_PRECISION+5) << temp[slice2*num_rows*num_cols + row2*num_cols + col2] << endl;
cout << "Enter a New Temperature For the Block: ";
while(!(cin >> new_temp)) {
clear_cin();
cout << "Incorrect input, enter a new temperature: ";
}
for(int i = row1; i < row2; i++) {
for(int j = col1; j < col2; j++) {
for(int k = slice1; k < slice2; k++) {
if(i >= 0 && i < num_rows && j >= 0 && j < num_cols && k >= 0 && k < num_slices) {
temp[k*num_rows*num_cols + i*num_cols + j] = new_temp;
}
}
}
}
}
}
*/
/**
* Allows the user to start one or more moving sources.
*/
using_moving_source = 0;
/*
cout << endl << endl << "To Start One or More Moving Sources Enter 1, Else Enter 0: ";
while(!(cin >> using_moving_source) || using_moving_source < 0 || using_moving_source > 1) {
clear_cin();
cout << "Incorrect Input, Enter 1 to Change, Else 0: ";
}
if(using_moving_source == 1) {
REAL mag, angle1, angle2;
cout << "Enter the number of moving sources: ";
while(!(cin >> num_mvsrc) || num_mvsrc <= 0) {
clear_cin();
cout << "Incorrect input, enter a number greater than 0: ";
}
mvsrc_x = new REAL[num_mvsrc];
mvsrc_y = new REAL[num_mvsrc];
mvsrc_z = new REAL[num_mvsrc];
mvsrc_offset_x = new REAL[num_mvsrc];
mvsrc_offset_y = new REAL[num_mvsrc];
mvsrc_offset_z = new REAL[num_mvsrc];
mvsrc_vel_x = new REAL[num_mvsrc];
mvsrc_vel_y = new REAL[num_mvsrc];
mvsrc_vel_z = new REAL[num_mvsrc];
mvsrc_accel_x = new REAL[num_mvsrc];
mvsrc_accel_y = new REAL[num_mvsrc];
mvsrc_accel_z = new REAL[num_mvsrc];
mvsrc_temp = new REAL[num_mvsrc];
mvsrc_valid = new int[num_mvsrc];
for(int i = 0; i < num_mvsrc; i++) {
cout << endl << "Moving source " << i << endl;
cout << "Valid coordinates are x=0-"<<max_dist_x<<" y=0-"<<max_dist_y<<" z=0-"<<max_dist_z<<":" << endl;
cout << "Enter the coordinates in meters for the corner closest to the origin, <x> <y> <z>: ";
while(!(cin >> mvsrc_x[i] >> mvsrc_y[i] >> mvsrc_z[i]) || mvsrc_x[i] < 0 || mvsrc_x[i] > max_dist_x || mvsrc_y[i] < 0 || mvsrc_y[i] > max_dist_y || mvsrc_z[i] < 0 || mvsrc_z[i] > max_dist_z) {
clear_cin();
cout << "Incorrect input, enter a valid coordinate between x=0-"<<max_dist_x<<" y=0-"<<max_dist_y<<" z=0-"<<max_dist_z<<":";
}
cout << "Valid sizes are x=0-"<<max_dist_x-mvsrc_x[i]<<" y=0-"<<max_dist_y-mvsrc_y[i]<<" z=0-"<<max_dist_z-mvsrc_z[i]<<":"<<endl;
cout << "Enter the size of the moving source in meters, <x size> <y size> <z size>: ";
while(!(cin >> mvsrc_offset_x[i] >> mvsrc_offset_y[i] >> mvsrc_offset_z[i]) || mvsrc_offset_x[i] <= 0 || mvsrc_offset_x[i] > max_dist_x-mvsrc_x[i] || mvsrc_offset_y[i] <= 0 || mvsrc_offset_y[i] > max_dist_y-mvsrc_y[i] || mvsrc_offset_z[i] <= 0 || mvsrc_offset_z[i] > max_dist_z-mvsrc_z[i]) {
clear_cin();
cout << "Incorrect input, enter a valid distance between x=0-"<<max_dist_x-mvsrc_x[i]<<" y=0-"<<max_dist_y-mvsrc_y[i]<<" z=0-"<<max_dist_z-mvsrc_z[i]<<":";
}
cout << "Enter the angle of the moving sources vector in degrees from positve x towards negative y (0-360): ";
while(!(cin >> angle1) || angle1 < 0 || angle1 > 360) {
clear_cin();
cout << "Incorrect input, enter a valid angle: ";
}
cout << "Enter the angle of the moving sources vector in degrees from positve z (0-180): ";
while(!(cin >> angle2) || angle2 < 0 || angle2 > 180) {
clear_cin();
cout << "Incorrect input, enter a valid angle: ";
}
cout << "Enter the magnitude of the velocity vector in m/year: ";
while(!(cin >> mag) || mag < 0) {
clear_cin();
cout << "Incorrect input, enter a velocity greater than 0: ";
}
mvsrc_vel_x[i] = mag*sin(angle2/180.0*M_PI)*cos(angle1/180.0*M_PI);
mvsrc_vel_y[i] = mag*sin(angle2/180.0*M_PI)*sin(angle1/180.0*M_PI);
mvsrc_vel_z[i] = mag*cos(angle2/180.0*M_PI);
cout << "Enter the magnitude of the acceleration vector in m/year^2: ";
while(!(cin >> mag) || mag < 0) {
clear_cin();
cout << "Incorrect input, enter an acceleration greater than 0: ";
}
mvsrc_accel_x[i] = mag*sin(angle2/180.0*M_PI)*cos(angle1/180.0*M_PI);
mvsrc_accel_y[i] = mag*sin(angle2/180.0*M_PI)*sin(angle1/180.0*M_PI);
mvsrc_accel_z[i] = mag*cos(angle2/180.0*M_PI);
cout << "Enter the temperature of the moving source: ";
while(!(cin >> mag)) {
clear_cin();
cout << "Incorrect input, enter a valid temperature: ";
}
mvsrc_temp[i] = mag;
mvsrc_valid[i] = 1;
update_mvsrc(i);
}
}
*/
//Allows the user to decrease the size of the time step
cout << endl << endl << "Each Iteration in Time Spans " << scientific << time_step << " Years" << endl;
cout << "Enter a Shorter Iteration Time in Years if Desired (any larger number otherwise): ";
while(!(cin >> temp_val) || temp_val <= 0) {
clear_cin();
cout << "Incorrect input, enter a number greater than 0: ";
}
if(temp_val < time_step) {
time_step = temp_val;
}
DHF = chf * QFAC * time_step;
//Calculates the number of convection loops to perform per time step
num_conv_loops = (int)(time_step/(10*tic));
if(num_conv_loops > 5) {
num_conv_loops = 5;
}
else if(num_conv_loops <= 0) {
num_conv_loops = 1;
}
//Calculates the time increment per convection loop
time_inc = time_step/num_conv_loops;
min_row_dim = 100.0;
for(int i = 0; i < num_rows; i++) {
if(dim_y[i] < min_row_dim) {
min_row_dim = dim_y[i];
}
}
//Asks the user for the runtime of the simulation
thermal_time_constant = min_row_dim*min_row_dim/max_thermal_conduct_diff;
cout << endl << endl << "The Thermal Time Constant for the Vertical Dimension is " << thermal_time_constant << " Years" << endl;
cout << "Enter Time Duration for Calculation in Years: ";
while(!(cin >> run_time) || run_time <= 0) {
clear_cin();
cout << "Incorrect input, enter a number greater than 0: ";
}
//Asks the user for the number of loops to perform between screen updates
cout << endl << endl << "Enter the Number of Loops Between Screen Updates: ";
while(!(cin >> num_loops) || num_loops <= 0) {
clear_cin();
cout << "Incorrect input, enter a number greater than 0: ";
}
use_tolerance = 0;
/*
cout << endl << endl << "To have the simulation stop once the temperature change meets a tolerance, Enter 1 otherwise 0: ";
while(!(cin >> use_tolerance) || use_tolerance < 0 || use_tolerance > 1) {
clear_cin();
cout << "Incorrect Input, Enter 1 to use a tolerance, Else 0: ";
}
if(use_tolerance == 1) {
cout << endl << "Enter the tolerance: ";
while(!(cin >> tolerance) || tolerance <= 0) {
clear_cin();
cout << "Incorrect input, enter a number greater than 0: ";
}
}
*/
//Initializes the simulation time to 0.0
sim_time = 0.0;
//Waits for the user to hit enter before beginning the simulation
cout << endl;
cin.ignore(numeric_limits <streamsize> ::max(), '\n' );
PressEnterToContinue();
/**
* The main loop of the simulation
*/
count = 0; //Number of loops performed
cout << endl << endl << num_loops << " loops between screen updates" << endl << endl;
if(use_tolerance == 0) {
cout << setw(15) << "num loops" << setw(20) << "run time (years)" << setw(20) << "sim time (years)" << endl;
}
else {
cout << setw(15) << "num loops" << setw(20) << "run time (years)" << setw(20) << "sim time (years)" << setw(20) << "Max temp diff" << endl;
}
#ifdef DISPLAY
if(display_mode == 1) {
array_minmax();
array_size = num_cols * num_rows;
color_field = new float[array_size * 3];
for (int i=0; i<array_size *3; i++) {
color_field[i] = 0.0;
}
glutInit(&argc, argv);
int windowWidth = glutGet(GLUT_SCREEN_WIDTH);
int windowHeight = glutGet(GLUT_SCREEN_HEIGHT);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(windowWidth, windowHeight);
glutInitWindowPosition(0, 0);
glutCreateWindow("ARC Simulation");
glViewport(0, 0, windowWidth,windowHeight);
glEnable (GL_BLEND);
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60, 1.77777f, 1.0, 20000.0);
glutDisplayFunc(display3D);
//glutMouseFunc(mouse_button);//Mouse motion and camera trans settings maintained for debugging
//glutMotionFunc(mouse_move);
glutKeyboardFunc(keyboard);
glutSpecialFunc(keyboardSpecial);
/*camera_trans[0] = -num_cols/2.0;
camera_trans[1] = num_rows/3.0;
camera_trans[2] = -num_rows*1.75*tan(28.0/180.0*M_PI);
camera_rot[0] = 28.0;
camera_trans_lag[0] = -num_cols/2.0;
camera_trans_lag[1] = num_rows/3.0;
camera_trans_lag[2] = -num_rows*1.75*tan(28.0/180.0*M_PI);
camera_rot_lag[0] = 28.0;
*/
//gluLookAt(num_cols/2.0,num_rows*0.1,num_rows,num_cols/2.0,-num_rows/3.0,0.0,0.0,1.0,0.0);
if(num_rows > num_cols) {
gluLookAt(num_cols/2.0,num_rows*0.1,num_rows,num_cols/2.0,-num_rows/3.0,0.0,0.0,1.0,0.0);
}
else {
gluLookAt(num_cols/2.0,num_rows*0.1,num_cols,num_cols/2.0,-num_rows/3.0,0.0,0.0,1.0,0.0);
}
glutMainLoop();
PressEnterToContinue();
}
else {
#endif
while(sim_time <= run_time) {
//Displays status information for the current loop
if(count%num_loops == 0) {
if(use_tolerance == 0) {
cout << setw(15) << count << setw(20) << fixed << setprecision(5) << sim_time << setw(20) << initial_time + sim_time << endl;
}
else {
cout << setw(15) << count << setw(20) << fixed << setprecision(5) << sim_time << setw(20) << initial_time + sim_time << setw(20) << max_temp_diff << endl;
}
//Saves the current state of the simulation if the save_state flag is set
if(save_state) {
save_model_state();
}
}
//Performs convection updates if the current simulation is using convection
if(using_convection) {
convection_cuda();
}
//Performs conduction calculations
conduction_cuda();
//Increments the simulation time and loop count
sim_time += time_step;
count++;
if(use_tolerance == 1) {
max_temp_diff = find_max_temp_diff();
if(max_temp_diff < tolerance) {
cout << "Maximum temperature change below the tolerance, stoping the simulation" << endl;
break;
}
}
//Updates the moving source
if(using_moving_source == 1) {
update_moving_sources();
}
}
//Saves the final result of the simulation
if(save_state == 1 || save_result == 1) {
save_model_state();
}
save_surfer();
//Waits for the user to hit enter before ending the simulation
cout << endl << "Simulation Complete" << endl;
PressEnterToContinue();
deallocate_memory();
deallocate_cuda_memory();
#ifdef DISPLAY
}
#endif
}
|
3c491559cc22e16c2478a0da64c79e13d42301a9.cu
|
/**
* Performs a finite difference heat flow
* simulation using conduction and convection.
* Uses CUDA to perform calculations on a GPGPU
*/
#define _USE_MATH_DEFINES
#ifdef _WIN32
#define NOMINMAX //FYI need to disable min/max macro in windows.h
#include <windows.h>
#endif
#ifdef DISPLAY
#ifdef __APPLE__
# include <OpenGL/gl.h>
# include <OpenGL/glu.h>
# include <GLUT/glut.h>
#else
# include <GL/GL.h>
# include <GL/GLU.h>
# include <GL/glut.h>
#endif
#endif
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <iomanip>
#include <limits>
#include <cmath>
/**
* Time_step (seconds/yr) divided by the product of density and heat capacity.
* The value for the density is 2200 kg/m^3 and heat capacity is 1000 kJ/kg K.
*/
#define QFAC 14.33 //Description is defined in the previous comment
#define DTC 0.25 //
#define OUT_PRECISION 10 //Number of digits to print after the decimal place for floating point values
#define INDEX_WIDTH 2 //The number of characters to print and read for each conduction and convection code
#define REAL double //The precision of the model.
#define FREEMEM 100000000 //Amount of memory to leave free on the GPU in bytes
using std::cerr;
using std::cin;
using std::cout;
using std::endl;
using std::string;
using std::ofstream;
using std::ifstream;
using std::ostringstream;
using std::setw;
using std::right;
using std::left;
using std::fixed;
using std::scientific;
using std::setprecision;
using std::setfill;
using std::ios;
using std::numeric_limits;
using std::streamsize;
using std::max;
using std::flush;
void save_surfer();
void save_model_state();
void conduction();
void convection();
void PressEnterToContinue();
REAL find_max_temp_diff();
void update_moving_sources();
void find_loc_index(REAL x_loc, REAL y_loc, REAL z_loc, int *index);
//Cuda specific variables
dim3 dimBlock; //Block dimensions for the kernel call
dim3 dimGrid; //Grid dimensions for the kernel call
cudaError error; //CUDA error variable
cudaDeviceProp deviceProp; //CUDA device properties
//Conduction code specific variables
int *cond_codes; //The unmodified conduction codes as read from the input file
int *cond_hp_index; //The conduction index for the radioactive heat production array
int *cond_tc_index; //The conduction index for the thermal conductivity array
int *use_cond; //Flag to indicate if conduction occurs for a given cell
REAL DHF; //
//Device specific conduction variables
int *dev_cond_codes; //The unmodified conduction codes as read from the input file
int *dev_cond_hp_index; //The conduction index for the radioactive heat production array
int *dev_cond_tc_index; //The conduction index for the thermal conductivity array
int *dev_use_cond;
//Convection code specific variables
int *conv_codes; //The convection codes as read from the input file
int *conv_min_temp_index; //The convection index for the minimum temp for convection array
int *conv_direction; //The direction of convection following the direction matrix in the previous comment
int *conv_vel_index; //The convection index for the velocity array
int *conv_fluid_index; //The convection index for the fluid heat capacity array
int *conv_rock_index; //The convection index for the rock heat capacity array
int num_conv_loops; //The number of convection updates to perform per time step
REAL time_inc; //The amount of time increment per convection loop
//Device specific convection variables
int *dev_conv_codes; //The convection codes as read from the input file
int *dev_conv_min_temp_index; //The convection index for the minimum temp for convection array
int *dev_conv_direction; //The direction of convection following the direction matrix in the previous comment
int *dev_conv_vel_index; //The convection index for the velocity array
int *dev_conv_fluid_index; //The convection index for the fluid heat capacity array
int *dev_conv_rock_index; //The convection index for the rock heat capacity array
//File names
string source_filename; //The input files name with extension
string output_filename; //The output state files name with extension
string output_su_filename; //The output surfer files name with extension
//Input file variables
string title; //The title of the input file
int using_convection = -1; //Indicates if convection is being used
REAL *temp; //The current temperature array
int num_rows; //The number of rows for the simulation
int num_cols; //The number of columns for the simulation
int num_slices; //Total number of slices to form the 3d simulation (one 'slice' has dimension rows x columns)
REAL *dim_x; //The dimensions of each column in the x direction
REAL *dim_y; //The dimensions of each row in the y direction
REAL *dim_z; //The dimensions of each row in the z direction
REAL *dist_x; //The distance from the origin to the center of a column for a given column index in the x direction
REAL *dist_y; //The distance from the origin to the center of a row for a given row index in the y direction
REAL *dist_z; //The distance from the origin to the center of a slice for a given slice index in the z direction
REAL max_dist_x; //The maximum x distance
REAL max_dist_y; //The maximum y distance
REAL max_dist_z; //The maximum z distance
REAL chf; //Constant Heat flow at base of model in mW M^2
REAL initial_time; //The initial starting time of the model
int num_hp; //The number of heat production values
int num_tcd; //The number of thermal conductivity difference values
int num_hcf; //The number of fluid heat capacity values
int num_hcr; //The number of rock heat capacity values
int num_mtc; //The number of minimum convection temperature values
int num_vel; //The number of convection velocities
REAL *heat_production_values; //The radioactive heat production values array used in conduction calculations
REAL *thermal_conduct_diff; //The thermal conductivity difference array used in conduction calculations
REAL *heat_capac_fluid; //The fluid heat capacity array used in convection calculations
REAL *heat_capac_rock; //The rock heat capacity array used in convection calculations
REAL *min_temp_conv; //The minimum temperature required for convection
REAL *vel; //The velocity array used for convection calculations
//Device specific variables
REAL *dev_temp; //The current temperature array
REAL *dev_dim_x; //The dimensions of each column in the x direction
REAL *dev_dim_y; //The dimensions of each row in the y direction
REAL *dev_dim_z; //The dimensions of each row in the y direction
REAL *dev_dist_x; //The dimensions of each row in the y direction
REAL *dev_dist_y; //The dimensions of each row in the y direction
REAL *dev_dist_z; //The dimensions of each row in the y direction
REAL *dev_heat_production_values; //The radioactive heat production values array used in conduction calculations
REAL *dev_thermal_conduct_diff; //The thermal conductivity difference array used in conduction calculations
REAL *dev_heat_capac_fluid; //The fluid heat capacity array used in convection calculations
REAL *dev_heat_capac_rock; //The rock heat capacity array used in convection calculations
REAL *dev_min_temp_conv; //The minimum temperature required for convection
REAL *dev_vel; //The velocity array used for convection calculations
//Moving source variables
int using_moving_source = -1; //Indicates if a moving source is being used
int num_mvsrc = -1; //The number of moving sources
REAL *mvsrc_x; //The x location of the moving sources
REAL *mvsrc_y; //The y location of the moving sources
REAL *mvsrc_z; //The z location of the moving sources
REAL *mvsrc_offset_x; //The size of the moving sources in the x direction
REAL *mvsrc_offset_y; //The size of the moving sources in the y direction
REAL *mvsrc_offset_z; //The size of the moving sources in the z direction
REAL *mvsrc_vel_x; //The x component of the moving sources velocity vectors
REAL *mvsrc_vel_y; //The y component of the moving sources velocity vectors
REAL *mvsrc_vel_z; //The z component of the moving sources velocity vectors
REAL *mvsrc_accel_x; //The x component of the moving sources acceleration vectors
REAL *mvsrc_accel_y; //The y component of the moving sources acceleration vectors
REAL *mvsrc_accel_z; //The z component of the moving sources acceleration vectors
REAL *mvsrc_temp; //The temperature of the moving sources
int *mvsrc_valid; //Indicates if a moving source is valid
//Time specific variables
REAL sim_time; //The current time of the simulation
REAL tic; //Time variable used in convection calculations
REAL time_step = -1; //The amount of time that passes between each update of the simulation, time step
REAL run_time = -1; //The run time of the simulation
//Global variables
int save_state = -1; //Indicates if the model should save the current state at each screen update
int save_result = -1; //Indicates if the model should save the final result of the simulaiton
int use_tolerance = -1; //Indicates if the model should stop once a user specified tolerance is met for temperature change
REAL max_vel; //The maximum convection velocity of the velocity array
REAL min_row_dim; //The minimum y dimension of each cell of the simulation
REAL min_col_dim; //The minimum x dimension of each cell of the simulation
REAL min_slice_dim; // The minimum z dimension of each cell in the simulation
REAL thermal_time_constant; //The thermal time constant of the model, used in the selection of the run time
REAL *next_temp; //The next temperature array
REAL max_thermal_conduct_diff; //The maximum thermal conductivity difference
REAL min_thermal_conduct_diff; //The minimum thermal conductivity difference
int num_loops = -1; //The number of loops between screen updates
int su_num_width; //The number of characters for the slice number in the output surfer filenames
unsigned long long count = 0; //The current loop
REAL tolerance = -1; //The maximum difference required for the model to stop
REAL max_temp_diff; //The maximum temperature difference between the current and next temperature arrays
unsigned long long num_cells; //The number of cells in the simulation
REAL *dev_next_temp; //The next temperature array
/**
* Deallocates all allocated memory used by the program
*/
void deallocate_memory() {
//Deletes allocated memory
delete[] dim_x;
delete[] dim_y;
delete[] dim_z;
delete[] dist_x;
delete[] dist_y;
delete[] dist_z;
delete[] temp;
delete[] next_temp;
delete[] cond_codes;
delete[] cond_hp_index;
delete[] cond_tc_index;
delete[] use_cond;
delete[] heat_production_values;
delete[] thermal_conduct_diff;
if(using_convection == 1) {
delete[] conv_codes;
delete[] conv_min_temp_index;
delete[] conv_direction;
delete[] conv_vel_index;
delete[] conv_fluid_index;
delete[] conv_rock_index;
delete[] heat_capac_fluid;
delete[] heat_capac_rock;
delete[] min_temp_conv;
delete[] vel;
}
if(using_moving_source == 1) {
delete[] mvsrc_x;
delete[] mvsrc_y;
delete[] mvsrc_z;
delete[] mvsrc_offset_x;
delete[] mvsrc_offset_y;
delete[] mvsrc_offset_z;
delete[] mvsrc_vel_x;
delete[] mvsrc_vel_y;
delete[] mvsrc_vel_z;
delete[] mvsrc_accel_x;
delete[] mvsrc_accel_y;
delete[] mvsrc_accel_z;
delete[] mvsrc_temp;
delete[] mvsrc_valid;
}
}
void deallocate_cuda_memory() {
cudaFree(dev_temp);
cudaFree(dev_next_temp);
cudaFree(dev_dim_x);
cudaFree(dev_dim_y);
cudaFree(dev_dim_z);
cudaFree(dev_dist_x);
cudaFree(dev_dist_y);
cudaFree(dev_dist_z);
cudaFree(dev_cond_codes);
cudaFree(dev_cond_hp_index);
cudaFree(dev_cond_tc_index);
cudaFree(dev_use_cond);
cudaFree(dev_heat_production_values);
cudaFree(dev_thermal_conduct_diff);
if(using_convection) {
cudaFree(dev_heat_capac_fluid);
cudaFree(dev_heat_capac_rock);
cudaFree(dev_min_temp_conv);
cudaFree(dev_vel);
cudaFree(dev_conv_codes);
cudaFree(dev_conv_min_temp_index);
cudaFree(dev_conv_direction);
cudaFree(dev_conv_vel_index);
cudaFree(dev_conv_fluid_index);
cudaFree(dev_conv_rock_index);
}
}
#ifdef DISPLAY
//Display variables
int display_mode = -1;
int array_size;
REAL min_temp;
REAL max_temp;
REAL layer_min_temp;
REAL layer_max_temp;
float *color_field;
float transparency = 1.0f;
int current_slice = 0;
/**
* Parameters to control the camera angle so we can move where we're looking at
* the simulation from with the mouse. Kept for debugging.
*/
/*
int ox = 0;
int oy = 0;
int buttonState = 0;
float camera_trans[] = {0, -0.2, -10};
float camera_rot[] = {0, 0, 0};
float camera_trans_lag[] = {0, -0.2, -10};
float camera_rot_lag[] = {0, 0, 0};
const float inertia = 0.1f;
*/
/*
* Sets max and min temperature values for simulation
*/
void array_minmax() {
min_temp=temp[0][0][0];
max_temp=temp[0][0][0];
for(int k=0; k<num_slices; k++) {
for (int i=0; i<num_rows; i++) {
for(int j=0; j<num_cols; j++) {
if(temp[i][j][k]<min_temp)
min_temp = temp[i][j][k];
if(temp[i][j][k]>max_temp)
max_temp = temp[i][j][k];
}
}
}
}
/**
* Updates max_temp with the maximum temp of
* the current slice.
*/
void array_max() {
max_temp=temp[0][0][0];
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
if(temp[i][j][current_slice] > max_temp) {
max_temp = temp[i][j][current_slice];
}
}
}
}
/*
* 3D to 1D indexing
*/
static int POSITION(int x, int y) {
return (x*num_cols)+y;
}
/*
* Colormap algorithm reproduces Matlab's RGB "Jet" plate
* Concept based on: http://paulbourke.net/texture_colour/colourspace/ (11/21/12)
*/
void jet_color_set(int x, int y, int z) {
REAL current_temp = temp[x][y][z];
REAL delta_temp = max_temp - min_temp;
if(current_temp < min_temp)
current_temp = min_temp;
if(current_temp > max_temp)
current_temp = max_temp;
if(current_temp < (min_temp + 0.25 * delta_temp)) {
color_field[POSITION(x,y) * 3] = (GLfloat)0.0;
color_field[POSITION(x,y) * 3 + 1] = (GLfloat)(4*(current_temp - min_temp)/delta_temp);
color_field[POSITION(x,y) * 3 + 2] = (GLfloat)1.0;
}
else if(current_temp < (min_temp + 0.5 * delta_temp)) {
color_field[POSITION(x,y) * 3] = (GLfloat)0.0;
color_field[POSITION(x,y) * 3 + 1] = (GLfloat)1.0;
color_field[POSITION(x,y) * 3 + 2] = (GLfloat)(1.0 + 4 * (min_temp + 0.25 * delta_temp - current_temp) / delta_temp);
}
else if(current_temp < (min_temp + 0.75 * delta_temp)) {
color_field[POSITION(x,y) * 3] = (GLfloat)(4 * (current_temp - min_temp - 0.5 * delta_temp) / delta_temp);
color_field[POSITION(x,y) * 3 + 1] = (GLfloat)1.0;
color_field[POSITION(x,y) * 3 + 2] = (GLfloat)0.0;
}
else {
color_field[POSITION(x,y) * 3] = (GLfloat)1.0;
color_field[POSITION(x,y) * 3 + 1] = (GLfloat)(1.0 + 4 * (min_temp + 0.75 * delta_temp - current_temp) / delta_temp);
color_field[POSITION(x,y) * 3 + 2] = (GLfloat)0.0;
}
}
/*
* Draw temp surface via 1x1 faces. Dimensions constant for simplicity.
* Originally drew cubes from Robert Bergmans voxel display code.
*/
void draw_cube(int x, int y, int z) {
if(z == current_slice) {
transparency = 1.0f;
}
else {
transparency = 0.3f;
}
glBegin(GL_TRIANGLES);
//front
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,1.0f);//5
glVertex3f(1.0f,0.0f,1.0f);//6
glVertex3f(0.0f,-1.0f,1.0f);//8
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,-1.0f,1.0f);//8
glVertex3f(1.0f,0.0f,1.0f);//6
glVertex3f(1.0f,-1.0f,1.0f);//7
//top
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,0.0f);//1
glVertex3f(1.0f,0.0f,0.0f);//2
glVertex3f(0.0f,0.0f,1.0f);//5
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,1.0f);//5
glVertex3f(1.0f,0.0f,0.0f);//2
glVertex3f(1.0f,0.0f,1.0f);//6
//QUADS code left in case we need it later
/*
glBegin(GL_QUADS);
//front
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,1.0f);//5
glVertex3f(1.0f,0.0f,1.0f);//6
glVertex3f(1.0f,-1.0f,1.0f);//7
glVertex3f(0.0f,-1.0f,1.0f);//8
//top
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,0.0f);//1
glVertex3f(1.0f,0.0f,0.0f);//2
glVertex3f(1.0f,0.0f,1.0f);//6
glVertex3f(0.0f,0.0f,1.0f);//5
*//*//left
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,0.0f);//1
glVertex3f(0.0f,0.0f,1.0f);//5
glVertex3f(0.0f,-1.0f,1.0f);//8
glVertex3f(0.0f,-1.0f,0.0f);//4
//right
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(1.0f,0.0f,0.0f);//2
glVertex3f(1.0f,0.0f,1.0f);//6
glVertex3f(1.0f,-1.0f,1.0f);//7
glVertex3f(1.0f,-1.0f,0.0f);//3
//bottom
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,-1.0f,0.0f);//4
glVertex3f(1.0f,-1.0f,0.0f);//3
glVertex3f(1.0f,-1.0f,1.0f);//7
glVertex3f(0.0f,-1.0f,1.0f);//8
//back
glColor4f( color_field[POSITION(x,y) * 3],
color_field[POSITION(x,y) * 3 + 1],
color_field[POSITION(x,y) * 3 + 2],
transparency);
glVertex3f(0.0f,0.0f,0.0f);//1
glVertex3f(1.0f,0.0f,0.0f);//2
glVertex3f(1.0f,-1.0f,0.0f);//3
glVertex3f(0.0f,-1.0f,0.0f);//4
*/
glEnd();
}
/*
*Draw all HUD/Overlay graphics. Quads for temp scale are hardcoded to Jet color map. Any new color map will require changes.
*/
void displayOverlay(){
int windowWidth = glutGet(GLUT_WINDOW_WIDTH);
int windowHeight = glutGet(GLUT_WINDOW_HEIGHT);
glMatrixMode( GL_PROJECTION );
glPushMatrix();
glLoadIdentity();
glOrtho(0.0f,windowWidth,windowHeight,0.0f,0.0f,1.0f);
glMatrixMode( GL_MODELVIEW );
glPushMatrix();
glLoadIdentity();
glBegin( GL_QUADS );
glColor3f( 0.0f, 0.0f, 1.0f );
glVertex2f( (GLfloat)(windowWidth/2-75), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2-45), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2-45), 50.0f );
glVertex2f( (GLfloat)(windowWidth/2-75), 50.0f );
glColor3f( 0.0f, 1.0f, 1.0f );
glVertex2f( (GLfloat)(windowWidth/2-45), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2-15), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2-15), 50.0f );
glVertex2f( (GLfloat)(windowWidth/2-45), 50.0f );
glColor3f( 0.0f, 1.0f, 0.0f );
glVertex2f( (GLfloat)(windowWidth/2-15), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2+15), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2+15), 50.0f );
glVertex2f( (GLfloat)(windowWidth/2-15), 50.0f );
glColor3f( 1.0f, 1.0f, 0.0f );
glVertex2f( (GLfloat)(windowWidth/2+15), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2+45), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2+45), 50.0f );
glVertex2f( (GLfloat)(windowWidth/2+15), 50.0f );
glColor3f( 1.0f, 0.0f, 0.0f );
glVertex2f( (GLfloat)(windowWidth/2+45), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2+75), 20.0f );
glVertex2f( (GLfloat)(windowWidth/2+75), 50.0f );
glVertex2f( (GLfloat)(windowWidth/2+45), 50.0f );
glEnd();
glPopMatrix();
glPushMatrix();
glLoadIdentity();
ostringstream str1;
str1 << "Min Temp < > Max Temp";
glColor3f(1.0f, 1.0f, 1.0f);
glRasterPos2f((GLfloat)(windowWidth/2-150),35.0f);
glutBitmapString(GLUT_BITMAP_HELVETICA_12, (const unsigned char *)str1.str().c_str());
str1.str("");
str1.clear();
str1 << setw(4) << min_temp;
glRasterPos2f((GLfloat)(windowWidth/2-100), 65.0f);
glutBitmapString(GLUT_BITMAP_HELVETICA_12, (const unsigned char *)str1.str().c_str());
str1.str("");
str1.clear();
str1 << setw(4) << (max_temp+min_temp)/2;
glRasterPos2f((GLfloat)(windowWidth/2-20), 65.0f);
glutBitmapString(GLUT_BITMAP_HELVETICA_12, (const unsigned char *)str1.str().c_str());
str1.str("");
str1.clear();
str1 << setw(4) << max_temp;
glRasterPos2f((GLfloat)(windowWidth/2+60), 65.0f);
glutBitmapString(GLUT_BITMAP_HELVETICA_12, (const unsigned char *)str1.str().c_str());
str1.str("");
str1.clear();
glPopMatrix();
glPushMatrix();
glLoadIdentity();
ostringstream str;
str << "Time Interval:" << endl;
if(using_convection) {
str << "Conv. Time Interval:" << endl;
str << "Num Conv. Loops:" << endl;
}
str << endl << "Loop Total:" << endl;
str << "Sim Time:" << endl;
str << "Cum. Sim Time:" << endl << endl;
str << "Model Dimensions:" << endl;
str << "Current Slice:" << endl;
str << "CHF:" << endl;
glColor3f(1.0f, 1.0f, 1.0f);
glRasterPos2f(10.0f,(GLfloat)(windowHeight*3.0/4.0));
glutBitmapString(GLUT_BITMAP_HELVETICA_12, (const unsigned char *)str.str().c_str());
str.str("");
str.clear();
str << setprecision(4) << scientific;
str << time_step << endl;
if(using_convection) {
str << time_inc << endl;
str << num_conv_loops << endl;
}
str << endl << count << endl;
str << sim_time << endl;
str << initial_time + sim_time << endl << endl;
str << num_rows << " X " << num_cols << " X " << num_slices << endl;
str << current_slice+1 << endl;
str << chf << endl;
glColor3f(1.0f, 1.0f, 1.0f);
glRasterPos2f(150.0f,(GLfloat)(windowHeight*3.0/4.0));
glutBitmapString(GLUT_BITMAP_HELVETICA_12, (const unsigned char *)str.str().c_str());
glPopMatrix();
glMatrixMode( GL_PROJECTION );
glPopMatrix();
}
/*
* Helper function called from display3d. Broken out for readability.
*/
void display_helper() {
array_max();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// Handle the camera angle. Maintained for debugging
/*
for (int c = 0; c < 3; ++c)
{
camera_trans_lag[c] += (camera_trans[c] - camera_trans_lag[c]) * inertia;
camera_rot_lag[c] += (camera_rot[c] - camera_rot_lag[c]) * inertia;
}
glTranslatef(camera_trans_lag[0], camera_trans_lag[1], camera_trans_lag[2]);
glRotatef(camera_rot_lag[0], 1.0, 0.0, 0.0);
glRotatef(camera_rot_lag[1], 0.0, 1.0, 0.0);
*/
//Draw the boundary lines
glBegin(GL_LINES);
glColor3f(1.0, 1.0, 1.0);
glVertex3f(0.0f,0.0f,0.0f);
glVertex3f((GLfloat)num_cols,0.0f,0.0f);
glVertex3f((GLfloat)num_cols,0.0f,0.0f);
glVertex3f((GLfloat)num_cols,(GLfloat)-num_rows,0.0f);
glVertex3f((GLfloat)num_cols,(GLfloat)-num_rows,0.0f);
glVertex3f(0.0f,(GLfloat)-num_rows,0.0f);
glVertex3f(0.0f,(GLfloat)-num_rows,0.0f);
glVertex3f(0.0f,0.0f,0.0f);
glVertex3f(0.0f,0.0f,(GLfloat)num_slices);
glVertex3f((GLfloat)num_cols,0.0f,(GLfloat)num_slices);
glVertex3f((GLfloat)num_cols,0.0f,(GLfloat)num_slices);
glVertex3f((GLfloat)num_cols,(GLfloat)-num_rows,(GLfloat)num_slices);
glVertex3f((GLfloat)num_cols,(GLfloat)-num_rows,(GLfloat)num_slices);
glVertex3f(0.0f,(GLfloat)-num_rows,(GLfloat)num_slices);
glVertex3f(0.0f,(GLfloat)-num_rows,(GLfloat)num_slices);
glVertex3f(0.0f,0.0f,(GLfloat)num_slices);
glVertex3f(0.0f,0.0f,0.0f);
glVertex3f(0.0f,0.0f,(GLfloat)num_slices);
glVertex3f((GLfloat)num_cols,0.0f,0.0f);
glVertex3f((GLfloat)num_cols,0.0f,(GLfloat)num_slices);
glVertex3f((GLfloat)num_cols,(GLfloat)-num_rows,0.0f);
glVertex3f((GLfloat)num_cols,(GLfloat)-num_rows,(GLfloat)num_slices);
glVertex3f(0.0f,(GLfloat)-num_rows,0.0f);
glVertex3f(0.0f,(GLfloat)-num_rows,(GLfloat)num_slices);
glEnd();
for(int i=0; i<num_rows; i++) {
for (int j=0; j<num_cols; j++) {
glPushMatrix();
glTranslatef((GLfloat)j,(GLfloat)-i,(GLfloat)current_slice);
jet_color_set(i,j,current_slice);
draw_cube(i,j,current_slice);
glPopMatrix();
}
}
displayOverlay();
glutSwapBuffers();
glutPostRedisplay();
}
/*
* Main simulation call during display. Makes required simulation calls e.g.- for convection, etc.
* Makes call to display helper function
*/
void display3D() {
//Displays status information for the current loop
if(count%num_loops == 0) {
if(use_tolerance == 0) {
cout << setw(15) << count << setw(20) << fixed << setprecision(5) << sim_time << setw(20) << initial_time + sim_time << endl;
}
else {
cout << setw(15) << count << setw(20) << fixed << setprecision(5) << sim_time << setw(20) << initial_time + sim_time << setw(20) << max_temp_diff << endl;
}
//Saves the current state of the simulation if the save_state flag is set
if(save_state) {
save_model_state();
}
display_helper();
}
if(sim_time <= run_time) {
//Performs convection updates if the current simulation is using convection
if(using_convection) {
convection();
}
//Performs conduction calculations
conduction();
//Increments the simulation time and loop count
sim_time += time_step;
count++;
if(use_tolerance == 1) {
max_temp_diff = find_max_temp_diff();
if(max_temp_diff < tolerance) {
cout << "Maximum temperature change below the tolerance, stoping the simulation" << endl;
//Saves the final result of the simulation
if(save_state == 1 || save_result == 1) {
save_model_state();
}
save_surfer();
cout << endl << "Simulation Complete" << endl;
delete[] color_field;
deallocate_memory();
glutLeaveMainLoop();
}
}
//Updates the moving source
if(using_moving_source == 1) {
update_moving_sources();
}
}
else {
//Saves the final result of the simulation
if(save_state == 1 || save_result == 1) {
save_model_state();
}
save_surfer();
cout << endl << "Simulation Complete" << endl;
delete[] color_field;
deallocate_memory();
glutLeaveMainLoop();
}
glutPostRedisplay();
}
/*
* This captures information when the mouse buttons are pressed.
* Maintained for debugging.
*/
/*
void mouse_button(int button, int state, int x, int y) {
int mods;
if (state == GLUT_DOWN)
buttonState |= 1<<button;
else if (state == GLUT_UP)
buttonState = 0;
mods = glutGetModifiers();
if (mods & GLUT_ACTIVE_SHIFT)
{
buttonState = 2;
}
else if (mods & GLUT_ACTIVE_CTRL)
{
buttonState = 3;
}
ox = x; oy = y;
glutPostRedisplay();
}
*/
/*
* This captures mouse motion information.
* Maintained for debugging
*/
/*
void mouse_move(int x, int y) {
float dx = (float)(x - ox);
float dy = (float)(y - oy);
if (buttonState == 3)
{
// left+middle = zoom
camera_trans[2] += (dy / 100.0f) * 0.5f * fabs(camera_trans[2]);
}
else if (buttonState & 2)
{
// middle = translate
camera_trans[0] += dx / 10.0f;
camera_trans[1] -= dy / 10.0f;
}
else if (buttonState & 1)
{
// left = rotate
camera_rot[0] += dy / 5.0f;
camera_rot[1] += dx / 5.0f;
}
ox = x; oy = y;
glutPostRedisplay();
}
*/
/*
*Standard keyboard character control
*/
void keyboard(unsigned char key, int x, int y) {
switch(key) {
case '-':
if(current_slice > 0) {
current_slice--;
//camera_trans[1]-=0.5;//For camera mouse control, above
//camera_trans[2]+=1.0;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60, 1.77777f, 1.0, 20000.0);
//gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
if(num_rows > num_cols) {
gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
else {
gluLookAt(num_cols/2.0,num_rows*0.1,num_cols+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
}
break;
case '+':
if(current_slice < num_slices-1) {
current_slice++;
//camera_trans[1]+=0.5;//For camera mouse control, above
//camera_trans[2]-=1.0;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60, 1.77777f, 1.0, 20000.0);
//gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
if(num_rows > num_cols) {
gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
else {
gluLookAt(num_cols/2.0,num_rows*0.1,num_cols+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
}
break;
case 'x':
exit(0);
default:
break;
}
display_helper();
}
/*
* Special keyboard control for arrows
*/
void keyboardSpecial(int key, int x, int y) {
switch(key) {
case GLUT_KEY_UP:
if(current_slice > 0) {
current_slice--;
//camera_trans[1]-=0.5;
//camera_trans[2]+=1.0;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60, 1.77777f, 1.0, 20000.0);
//gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
if(num_rows > num_cols) {
gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
else {
gluLookAt(num_cols/2.0,num_rows*0.1,num_cols+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
}
break;
case GLUT_KEY_DOWN:
if(current_slice < num_slices-1) {
current_slice++;
//camera_trans[1]+=0.5;
//camera_trans[2]-=1.0;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60, 1.77777f, 1.0, 20000.0);
//gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
if(num_rows > num_cols) {
gluLookAt(num_cols/2.0,num_rows*0.1,num_rows+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
else {
gluLookAt(num_cols/2.0,num_rows*0.1,num_cols+current_slice,num_cols/2.0,-num_rows/3.0,current_slice,0.0,1.0,0.0);
}
}
break;
default:
break;
}
display_helper();
}
#endif
/**
* Clears the cin buffer
*/
void clear_cin() {
cin.clear();
cin.ignore(numeric_limits <streamsize> ::max(), '\n' );
}
/**
* This function waits for the user to hit enter before continuing
*/
void PressEnterToContinue() {
cout << "Press ENTER to continue... " << flush;
clear_cin();
}
/**
* Swaps the temp arrays
*/
void swap_temp_array() {
REAL *tmp;
tmp = temp;
temp = next_temp;
next_temp = tmp;
}
void swap_temp_array_cuda() {
REAL *tmp;
tmp = dev_temp;
dev_temp = dev_next_temp;
dev_next_temp = tmp;
}
/**
* Loads the input file into program memory and allocates
* necessary memory to store the input variables
*/
void load_file() {
ifstream source_file; //Input file stream
string temp_str;
ostringstream str_conv;
//Ask for the input file names and displays an error message
//if the file does not exist
do {
cout << "Input File Name: ";
cin >> source_filename;
source_file.open(source_filename.c_str(),ios::in);
if(!source_file.is_open()) {
cout << "File Not found!" << endl;
}
} while(!source_file.is_open());
//Asks the user if the state of the model should be saved every screen update
cout << endl << "To save the state of the model every screen update enter 1, otherwise 0: ";
while(!(cin >> save_state) || save_state < 0 || save_state > 1) {
clear_cin();
cout << "Incorrect input, to save the state of the model enter 1, else 0: ";
}
if(save_state == 0) {
//Asks the user if the final result of the model should be saved
cout << endl << "To save the final result of the model enter 1, otherwise 0: ";
while(!(cin >> save_result) || save_result < 0 || save_result > 1) {
clear_cin();
cout << "Incorrect input, to save the final result of the model enter 1, else 0: ";
}
}
//Ask for the state filename if the user specified that the file should be saved
if(save_state == 1 || save_result == 1) {
cout << "Output filename: ";
cin >> output_filename;
}
//Asks for the DSAA surfer grid filenmae
cout << "Surfer filename: ";
while(!(cin >> output_su_filename) || output_su_filename.length() < 5) {
clear_cin();
cout << "Please enter a filename at least 5 characters in length: ";
}
//Loads the input file
cout << endl << endl << "Loading Input File";
//Retrieves the simulation parameters from the input file
source_file >> num_rows >> num_cols >> num_slices >> using_convection;
source_file >> chf >> initial_time;
getline(source_file,title);
getline(source_file,title);
//Calculates the number of cells in the simulation
num_cells = num_rows*num_cols*num_slices;
//Calculates the dimensions of the grid and blocks
if(num_cells <= deviceProp.maxThreadsDim[0]) {
dimBlock.x = num_cells;
dimGrid.x = 1;
}
else {
dimBlock.x = deviceProp.maxThreadsDim[0];
dimGrid.x = (int)(ceil(num_cells/(REAL)deviceProp.maxThreadsDim[0]));
}
//Calculates the amount of memory to be used by the program
unsigned long long total_mem_used = num_rows*num_cols*num_slices*(2*sizeof(REAL) + 4*sizeof(int)) + sizeof(REAL)*2*(num_rows+num_cols+num_slices);
if(using_convection) {
total_mem_used += num_rows*num_cols*num_slices*6*sizeof(int);
}
//Exits the program if the estimated amount of memory exceeds the amount of global memory
cout << endl << endl << "Estimated amount of memory usage: " << total_mem_used << endl;
if(total_mem_used > deviceProp.totalGlobalMem-FREEMEM) {
cout << "Simulation exceeds global memory limits of GPU, Exiting Program!" << endl;
exit(1);
}
//Checks the total and used amount of device global memory before allocation
size_t free_memory; //Free memory on the device
size_t total_memory; //Total memory on the device
error = cudaMemGetInfo(&free_memory, &total_memory); //Retrieves the memory information for the device
if(error != cudaSuccess) {
cerr << endl << "Error while getting memory information" << endl;
cerr << error << ":" << cudaGetErrorString(error) << endl;
cerr << "Exiting the Program" << endl;
exit(0);
}
cout << "Free memory: "<< (unsigned int)free_memory << ", total memory: "<< (unsigned int)total_memory<<" (before initialization)" << endl;
//displays parameters of the input file
cout << endl << endl << "Total Number of Cells = " << num_cells << endl;
cout << "Number of rows = " << num_rows << endl;
cout << "Number of cols = " << num_cols << endl;
cout << "Number of slices = " << num_slices << endl;
if(using_convection == 1) {
cout << "Using convection" << endl;
}
else {
cout << "No Convection" << endl;
}
cout << endl << "Constant Heat Flow at Base of Model = " << chf << "mW M^2" << endl;
chf *= 0.001;
cout << "Model time elapsed = " << initial_time << " Years" << endl << endl;
//Calculates the number of characters for the surfer file index
str_conv << num_slices;
su_num_width = str_conv.str().length();
//Allocates memory for the conduction variables based on the previously read in simulation
//parameters
dim_x = new REAL[num_cols];
dim_y = new REAL[num_rows];
dim_z = new REAL[num_slices];
dist_x = new REAL[num_cols];
dist_y = new REAL[num_rows];
dist_z = new REAL[num_slices];
temp = new REAL[num_rows*num_cols];
next_temp = new REAL[num_rows*num_cols];
cond_codes = new int[num_rows*num_cols];
cond_hp_index = new int[num_rows*num_cols];
cond_tc_index = new int[num_rows*num_cols];
use_cond = new int[num_rows*num_cols];
//Allocates conduction specifc variables in device memory
error = cudaMalloc((void **) &dev_dim_x,num_cols*sizeof(REAL));
error = cudaMalloc((void **) &dev_dim_y,num_rows*sizeof(REAL));
error = cudaMalloc((void **) &dev_dim_z,num_slices*sizeof(REAL));
error = cudaMalloc((void **) &dev_dist_x,num_cols*sizeof(REAL));
error = cudaMalloc((void **) &dev_dist_y,num_rows*sizeof(REAL));
error = cudaMalloc((void **) &dev_dist_z,num_slices*sizeof(REAL));
error = cudaMalloc((void **) &dev_temp,num_cols*num_rows*num_slices*sizeof(REAL));
error = cudaMalloc((void **) &dev_next_temp,num_cols*num_rows*num_slices*sizeof(REAL));
error = cudaMalloc((void **) &dev_cond_codes,num_cols*num_rows*num_slices*sizeof(int));
error = cudaMalloc((void **) &dev_cond_hp_index,num_cols*num_rows*num_slices*sizeof(int));
error = cudaMalloc((void **) &dev_cond_tc_index,num_cols*num_rows*num_slices*sizeof(int));
error = cudaMalloc((void **) &dev_use_cond,num_cols*num_rows*num_slices*sizeof(int));
if(error != cudaSuccess) {
cerr << "Unable to allocate device memory for conduction variables" << endl;
exit(1);
}
//Reads in the starting temperatures of the simulation from the input file
for (int k = 0; k < num_slices; k++) {
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
source_file >> temp[i*num_cols + j];
}
}
//Copies the current termperature slice to the device
error = cudaMemcpy(&dev_temp[k*num_cols*num_rows],temp,num_rows*num_cols*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy Temps to device memory" << endl;
exit(1);
}
}
cout << "Read " << num_rows << " X " << num_cols << " X " << num_slices << " temps" << endl;
//Reads in the conduction codes for each cell of the simulation and parses
//the array indexs from the codes
//Unlike, the Fortran version of the program, the conduction direction codes
//are ignored since the simulation accounts for them internally
for (int k = 0; k < num_slices; k++) {
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
source_file >> temp_str;
cond_codes[i*num_cols + j] = atoi(temp_str.c_str());
cond_tc_index[i*num_cols + j] = atoi(temp_str.substr(0*INDEX_WIDTH,INDEX_WIDTH).c_str())-1;
cond_hp_index[i*num_cols + j] = atoi(temp_str.substr(1*INDEX_WIDTH,INDEX_WIDTH).c_str())-1;
use_cond[i*num_cols + j] = atoi(temp_str.substr(2*INDEX_WIDTH,1).c_str());
}
}
//Copies the current conduction code slice to device memory
error = cudaMemcpy(&dev_cond_codes[k*num_rows*num_cols],cond_codes,num_rows*num_cols*sizeof(int),cudaMemcpyHostToDevice);
error = cudaMemcpy(&dev_cond_tc_index[k*num_rows*num_cols],cond_tc_index,num_rows*num_cols*sizeof(int),cudaMemcpyHostToDevice);
error = cudaMemcpy(&dev_cond_hp_index[k*num_rows*num_cols],cond_hp_index,num_rows*num_cols*sizeof(int),cudaMemcpyHostToDevice);
error = cudaMemcpy(&dev_use_cond[k*num_rows*num_cols],use_cond,num_rows*num_cols*sizeof(int),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy conduction codes to device memory" << endl;
exit(1);
}
}
cout << "Read " << num_rows << " X " << num_cols << " X " << num_slices << " conduction codes" << endl;
//If convection is used for the user specified input file, memory is allocated for its
//variables and they are read in from the input file
if(using_convection) {
//Allocates memory for the convection variables based on the previously read in simulation
//parameters
conv_codes = new int[num_rows*num_cols];
conv_min_temp_index = new int[num_rows*num_cols];
conv_direction = new int[num_rows*num_cols];
conv_vel_index = new int[num_rows*num_cols];
conv_fluid_index = new int[num_rows*num_cols];
conv_rock_index = new int[num_rows*num_cols];
//Allocates convection specifc variables in device memory
error = cudaMalloc((void **) &dev_conv_codes,num_cols*num_rows*num_slices*sizeof(int));
error = cudaMalloc((void **) &dev_conv_min_temp_index,num_cols*num_rows*num_slices*sizeof(int));
error = cudaMalloc((void **) &dev_conv_direction,num_cols*num_rows*num_slices*sizeof(int));
error = cudaMalloc((void **) &dev_conv_vel_index,num_cols*num_rows*num_slices*sizeof(int));
error = cudaMalloc((void **) &dev_conv_fluid_index,num_cols*num_rows*num_slices*sizeof(int));
error = cudaMalloc((void **) &dev_conv_rock_index,num_cols*num_rows*num_slices*sizeof(int));
if(error != cudaSuccess) {
cerr << "Unable to allocate device memory for convection" << endl;
exit(1);
}
//Reads in the convection codes for each cell of the simulation and parses the array
//indexs from the ocdes
for (int k = 0; k < num_slices; k++) {
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
source_file >> temp_str;
conv_codes[i*num_cols + j] = atoi(temp_str.c_str());
conv_min_temp_index[i*num_cols + j] = atoi(temp_str.substr(0*INDEX_WIDTH,INDEX_WIDTH).c_str())-1;
conv_vel_index[i*num_cols + j] = atoi(temp_str.substr(1*INDEX_WIDTH,INDEX_WIDTH).c_str())-1;
conv_fluid_index[i*num_cols + j] = atoi(temp_str.substr(2*INDEX_WIDTH,INDEX_WIDTH).c_str())-1;
conv_rock_index[i*num_cols + j] = atoi(temp_str.substr(3*INDEX_WIDTH,INDEX_WIDTH).c_str())-1;
conv_direction[i*num_cols + j] = atoi(temp_str.substr(4*INDEX_WIDTH,2).c_str());
}
}
//Copies the current convection code slice to device memory
error = cudaMemcpy(&dev_conv_codes[k*num_cols*num_rows],conv_codes,num_rows*num_cols*sizeof(int),cudaMemcpyHostToDevice);
error = cudaMemcpy(&dev_conv_min_temp_index[k*num_cols*num_rows],conv_min_temp_index,num_rows*num_cols*sizeof(int),cudaMemcpyHostToDevice);
error = cudaMemcpy(&dev_conv_direction[k*num_cols*num_rows],conv_direction,num_rows*num_cols*sizeof(int),cudaMemcpyHostToDevice);
error = cudaMemcpy(&dev_conv_vel_index[k*num_cols*num_rows],conv_vel_index,num_rows*num_cols*sizeof(int),cudaMemcpyHostToDevice);
error = cudaMemcpy(&dev_conv_fluid_index[k*num_cols*num_rows],conv_fluid_index,num_rows*num_cols*sizeof(int),cudaMemcpyHostToDevice);
error = cudaMemcpy(&dev_conv_rock_index[k*num_cols*num_rows],conv_rock_index,num_rows*num_cols*sizeof(int),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy convection codes to device memory" << endl;
exit(1);
}
}
cout << "Read " << num_rows << " X " << num_cols << " X " << num_slices << " convection codes" << endl;
}
//Reads in the Y (column) dimensions and finds the minimum column distance
for(int i = 0; i < num_cols; i++) {
source_file >> dim_x[i];
if(i == 0) {
min_col_dim = dim_x[0];
dist_x[0] = dim_x[0]/2.0;
}
else {
if(dim_x[i] < min_col_dim) {
min_col_dim = dim_x[i];
}
dist_x[i] = dist_x[i-1] + dim_x[i-1]/2.0 + dim_x[i]/2.0;
}
}
max_dist_x = dist_x[num_cols-1] + dim_x[num_cols-1]/2.0;
//Copies the x dimensions and distances to device memory
error = cudaMemcpy(dev_dim_x,dim_x,num_cols*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy x dimensions to device" << endl;
exit(1);
}
error = cudaMemcpy(dev_dist_x,dist_x,num_cols*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy x dimensions to device" << endl;
exit(1);
}
//Reads in the X (row) dimensions and finds the minimum row distance
for(int i = 0; i < num_rows; i++) {
source_file >> dim_y[i];
if(i == 0) {
min_row_dim = dim_y[i];
dist_y[0] = dim_y[0]/2.0;
}
else {
if(dim_y[i] < min_row_dim) {
min_row_dim = dim_y[i];
}
dist_y[i] = dist_y[i-1] + dim_y[i-1]/2.0 + dim_y[i]/2.0;
}
}
max_dist_y = dist_y[num_rows-1] + dim_y[num_rows-1]/2.0;
//Copies the y dimensions and distances to device memory
error = cudaMemcpy(dev_dim_y,dim_y,num_rows*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy y dimensions to device" << endl;
exit(1);
}
error = cudaMemcpy(dev_dist_y,dist_y,num_rows*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy y dimensions to device" << endl;
exit(1);
}
//Reads in the Z (slice depth) dimension and finds the minimum row distance
for (int i = 0; i < num_slices; i++) {
source_file >> dim_z[i];
if (i == 0) {
min_slice_dim = dim_z[i];
dist_z[0] = dim_z[0]/2.0;
}
else {
if (dim_z[i] < min_slice_dim) {
min_slice_dim = dim_z[i];
}
dist_z[i] = dist_z[i-1] + dim_z[i-1]/2.0 + dim_z[i]/2.0;
}
}
max_dist_z = dist_z[num_slices-1] + dim_z[num_slices-1]/2.0;
//Copies the z dimensions and distances to device memory
error = cudaMemcpy(dev_dim_z,dim_z,num_slices*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy y dimensions to device" << endl;
exit(1);
}
error = cudaMemcpy(dev_dist_z,dist_z,num_slices*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy y dimensions to device" << endl;
exit(1);
}
//Reads in the conduction heat production values
source_file >> num_hp;
heat_production_values = new REAL[num_hp];
for(int i = 0; i < num_hp; i++) {
source_file >> heat_production_values[i];
heat_production_values[i] /= 1E6;
}
//Allocates and copies the heat production values to device memory
error = cudaMalloc((void **) &dev_heat_production_values,num_hp*sizeof(REAL));
error = cudaMemcpy(dev_heat_production_values,heat_production_values,num_hp*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy heat production values to device" << endl;
exit(1);
}
cout << "Read "<< num_hp << " heat production values" << endl;
//Reads in the thermal conduction difference values
//Finds the minimum and maximum thermal conductivity differences and
//performs some scaling of the conduction associated variables
source_file >> num_tcd;
thermal_conduct_diff = new REAL[num_tcd];
cout << "Converted " << num_tcd << " Thermal Conductivities to Diff. in m^2/y" << endl;
for(int i = 0; i < num_tcd; i++) {
source_file >> thermal_conduct_diff[i];
thermal_conduct_diff[i] *= 14.33;
if(i == 0) {
max_thermal_conduct_diff = thermal_conduct_diff[0];
min_thermal_conduct_diff = thermal_conduct_diff[0];
}
else {
if(thermal_conduct_diff[i] > max_thermal_conduct_diff) {
max_thermal_conduct_diff = thermal_conduct_diff[i];
}
if(thermal_conduct_diff[i] < min_thermal_conduct_diff) {
min_thermal_conduct_diff = thermal_conduct_diff[i];
}
}
cout << " " << thermal_conduct_diff[i];
}
//Allocates and copies the thermal conductivity difference values to device memory
error = cudaMalloc((void **) &dev_thermal_conduct_diff,num_tcd*sizeof(REAL));
error = cudaMemcpy(dev_thermal_conduct_diff,thermal_conduct_diff,num_tcd*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy thermal conductivities to device" << endl;
exit(1);
}
//Reads in the convection specific variables if convection
//is used by the user specified input file
if(using_convection) {
//Reads in the fluid heat capacity values
source_file >> num_hcf;
heat_capac_fluid = new REAL[num_hcf];
for(int i = 0; i < num_hcf; i++) {
source_file >> heat_capac_fluid[i];
}
//Allocates and copies the fluid heat capacity values to device memory
error = cudaMalloc((void **) &dev_heat_capac_fluid,num_hcf*sizeof(REAL));
error = cudaMemcpy(dev_heat_capac_fluid,heat_capac_fluid,num_hcf*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy fluid heat capacity to device" << endl;
exit(1);
}
//Reads in the rock heat capacity values
source_file >> num_hcr;
heat_capac_rock = new REAL[num_hcr];
for(int i = 0; i < num_hcr; i++) {
source_file >> heat_capac_rock[i];
}
//Allocates and copies the rock heat capacity values to device memory
error = cudaMalloc((void **) &dev_heat_capac_rock,num_hcr*sizeof(REAL));
error = cudaMemcpy(dev_heat_capac_rock,heat_capac_rock,num_hcr*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy rock heat capacity to device" << endl;
exit(1);
}
//Reads in the minimum convection temperatures
source_file >> num_mtc;
min_temp_conv = new REAL[num_mtc];
for(int i = 0; i < num_mtc; i++) {
source_file >> min_temp_conv[i];
}
//Allocates and copies the minimum convection temperature values to device memory
error = cudaMalloc((void **) &dev_min_temp_conv,num_mtc*sizeof(REAL));
error = cudaMemcpy(dev_min_temp_conv,min_temp_conv,num_mtc*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy minimum temp for convection to device" << endl;
exit(1);
}
//Reads in the convection velocities
source_file >> num_vel;
vel = new REAL[num_vel];
for(int i = 0; i < num_vel; i++) {
source_file >> vel[i];
}
cout << endl << "Read " << num_vel << " Velocities in m/yr" << endl;
//Allocates and copies the convection velocities to device memory
error = cudaMalloc((void **) &dev_vel,num_vel*sizeof(REAL));
error = cudaMemcpy(dev_vel,vel,num_vel*sizeof(REAL),cudaMemcpyHostToDevice);
if(error != cudaSuccess) {
cerr << "Unable to copy convection velocities to device" << endl;
exit(1);
}
//Finds the maximum convection velocity
max_vel = vel[0];
cout << " " << vel[0];
for(int i = 1 ; i < num_vel; i++) {
if(vel[i] > max_vel) {
max_vel = vel[i];
}
cout << " " << vel[i];
}
cout << endl;
}
//Closes the input file
source_file.close();
/*
T1 = max_thermal_conduct_diff;
T2 = min_col_dim;
T3 = min_row_dim;
*/
//Finds the convection time increment
if(using_convection) {
if(min_col_dim > min_row_dim) {
tic = min_row_dim/max_vel;
}
else {
tic = min_col_dim/max_vel;
}
}
//Calculates the maximum time step of the simulation
if(min_col_dim < min_row_dim) {
time_step = min_col_dim*min_col_dim/(5*max_thermal_conduct_diff);
}
else {
time_step = min_row_dim*min_row_dim/(5*max_thermal_conduct_diff);
}
cout << endl << "Done Loading Input File" << endl;
}
/**
* Saves the current state of the simulation, using the same format
* as the input file
*/
void save_model_state() {
ofstream output_file; //Output file stream
//Opens the output file for writing
output_file.open(output_filename.c_str(),ios::out);
if(!output_file.is_open()) {
cerr << "Failed to write state to file" << endl;
exit(1);
}
else {
//Prints the simulation parameters to the output file
output_file << setw(20) << num_rows << " " << setw(20) << num_cols << " " << setw(20) << num_slices << setw(20) << using_convection << endl;
output_file << setw(20) << fixed << setprecision(OUT_PRECISION) << chf*1000.0 << " " << setw(20) << initial_time + sim_time << endl;
output_file << title << endl;
output_file << setprecision(OUT_PRECISION);
//Prints the current temperature array of the simulation
for (int k = 0; k < num_slices; k++) {
//Copies the current temperature slice into host memory
error = cudaMemcpy(temp,&dev_temp[k*num_rows*num_cols],num_rows*num_cols*sizeof(REAL),cudaMemcpyDeviceToHost);
if(error != cudaSuccess) {
cerr << "Unable to copy convection velocities to device" << endl;
exit(1);
}
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
output_file << " " << setw(OUT_PRECISION+5) << temp[i*num_cols + j];
}
output_file << endl;
}
output_file << endl;
}
//Prints the conduction codes of the simulation to the output file
output_file << setfill('0');
for (int k = 0; k < num_slices; k++) {
//Copies the current conduction code slice into host memory
error = cudaMemcpy(cond_codes,&dev_cond_codes[k*num_rows*num_cols],num_rows*num_cols*sizeof(int),cudaMemcpyDeviceToHost);
if(error != cudaSuccess) {
cerr << "Unable to copy convection velocities to device" << endl;
exit(1);
}
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
output_file << " " << setw(2*INDEX_WIDTH+1) << cond_codes[i*num_cols + j];
}
output_file << endl;
}
output_file << endl;
}
//Prints the convection codes to the output file if convection is being used
if(using_convection) {
for (int k = 0; k < num_slices; k++) {
//Copies the current convection code slice into host memory
error = cudaMemcpy(conv_codes,&dev_conv_codes[k*num_rows*num_cols],num_rows*num_cols*sizeof(int),cudaMemcpyDeviceToHost);
if(error != cudaSuccess) {
cerr << "Unable to copy convection velocities to device" << endl;
exit(1);
}
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
output_file << " " << setw(4*INDEX_WIDTH+2) << conv_codes[i*num_cols + j];
}
output_file << endl;
}
output_file << endl;
}
}
output_file << setfill(' ');
output_file << setprecision(3);
//Prints the column (X) dimensions of the simulation to the output file
for(int i = 0; i < num_cols; i++) {
output_file << " " << dim_x[i];
}
output_file << endl;
//Prints the row (Y) dimensions of the simulation to the output file
for(int i = 0; i < num_rows; i++) {
output_file << " " << dim_y[i];
}
output_file << endl;
// Prints the slice (Z) dimensions of the simulation to the oputput file
for (int i = 0; i < num_slices; i++) {
output_file << " " << dim_z[i];
}
output_file << endl;
//Prints the heat production values of the simulation to the output file
output_file << " " << num_hp;
for(int i = 0; i < num_hp; i++) {
output_file << " " << scientific << heat_production_values[i]*1E6;
}
output_file << endl;
//Prints the thermal conductivity difference values to the output file
output_file << " " << num_tcd;
for(int i = 0; i < num_tcd; i++) {
output_file << " " << thermal_conduct_diff[i]/14.33;
}
output_file << endl;
//Prints the convection specific variables to the output file if convection is used
if(using_convection) {
//Prints the fluid heat capacity values to the output file
output_file << " " << num_hcf;
for(int i = 0; i < num_hcf; i++) {
output_file << " " << heat_capac_fluid[i];
}
output_file << endl;
//Prints the rock heat capacity values to the output file
output_file << " " << num_hcr;
for(int i = 0; i < num_hcr; i++) {
output_file << " " << heat_capac_rock[i];
}
output_file << endl;
//Prints the minimum convection temps to the output file
output_file << " " << num_mtc;
for(int i = 0; i < num_mtc; i++) {
output_file << " " << min_temp_conv[i];
}
output_file << endl;
//Prints the convection velocities to the output file
output_file << " " << num_vel;
for(int i = 0; i < num_vel; i++) {
output_file << " " << vel[i];
}
output_file << endl;
}
//Closes the output file
output_file.close();
}
}
/**
* Saves the current temperatures of the simulation to a DSAA surfer grid file
*/
void save_surfer() {
ofstream output_file; //Output file stream
ostringstream oss;
string filename, extension;
filename = output_su_filename.substr(0,output_su_filename.length()-4);
extension = output_su_filename.substr(output_su_filename.length()-4,4);
for(int k = 0; k < num_slices; k++) {
oss.str("");
oss.clear();
oss << filename << setfill('0') << setw(su_num_width) << k << extension;
//Opens the output file for writting
output_file.open(oss.str().c_str(),ios::out);
if(!output_file.is_open()) {
cerr << "Failed to write surfer file" << endl;
exit(1);
}
else {
REAL min_temp, max_temp, temp_range; //Minimum and maximum temperatures.
REAL xmax,ymin; //Maximum x and minimum y distances
//Copies the current temperature slice to host memory
error = cudaMemcpy(temp,&dev_temp[k*num_rows*num_cols],num_rows*num_cols*sizeof(REAL),cudaMemcpyDeviceToHost);
if(error != cudaSuccess) {
cerr << "Unable to copy convection velocities to device" << endl;
exit(1);
}
//Finds the minimum and maximum temps in the temperature array
min_temp = max_temp = temp[0];
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
if(temp[i*num_cols + j] > max_temp) {
max_temp = temp[i*num_cols + j];
}
if(temp[i*num_cols + j] < min_temp) {
min_temp = temp[i*num_cols + j];
}
}
}
//Calculates the temperature range.
temp_range = max_temp - min_temp;
if(temp_range == 0) {
temp_range = 1.0;
}
//Calculates the maximum x distance and the
//minimum y distance
xmax = dim_x[0]*num_cols;
ymin = dim_y[0]*num_rows;
if(dim_x[0] < 0.01) {
xmax *= 1000;
}
else if(dim_x[0] < 0.1) {
xmax *= 100;
}
else if(dim_x[0] < 1) {
xmax *= 10;
}
if(dim_y[0] < 0.01) {
ymin *= 1000;
}
else if(dim_y[0] < 0.1) {
ymin *= 100;
}
else if(dim_y[0] < 1) {
ymin *= 10;
}
//Prints the DSAA surfer grid parameters to the output file
output_file << "DSAA" << endl;
output_file << setw(20) << num_cols << " " << setw(20) << num_rows << endl;
output_file << fixed << setprecision(3) << setw(20) << 0.0 << " " << setw(20) << xmax << endl;
output_file << setw(20) << -ymin << " " << setw(20) << 0.0 << endl;
output_file << setw(20) << setprecision(OUT_PRECISION) << min_temp << " " << setw(20) << max_temp << endl;
//Prints the temperature array to the output file
for(int i = num_rows-1; i >= 0; i--) {
for(int j = 0; j < num_cols; j++) {
output_file << " " << setw(OUT_PRECISION+5) << temp[i*num_cols + j];
}
output_file << endl;
}
//Closes the output file
output_file.close();
}
}
}
/**
* Calculates and returns the heat flow per year between two cells in the X direction
* based on the provided indexes
*/
__device__ REAL cond_add_x(int row1, int col1, int slice1, int row2, int col2, int slice2, int num_rows, int num_cols, int num_slices, REAL *dim_x, REAL *dim_y, REAL *dim_z, REAL *temp, REAL *next_temp, REAL *thermal_conduct_diff, int *cond_tc_index) {
REAL temp_diff; //Temperature difference between the two cells
REAL ad; //
temp_diff = temp[slice1*num_rows*num_cols + row1*num_cols + col2] - temp[slice1*num_rows*num_cols + row1*num_cols + col1];
ad = dim_x[col2]/thermal_conduct_diff[cond_tc_index[slice1*num_rows*num_cols + row1*num_cols + col2]] + dim_x[col1]/thermal_conduct_diff[cond_tc_index[slice1*num_rows*num_cols + row1*num_cols + col1]];
return 2*temp_diff/(ad*dim_x[col1]);
}
/**
* Calculates and returns the heat flow per year between two cells in the Y direction
* based on the provided indexes
*/
__device__ REAL cond_add_y(int row1, int col1, int slice1, int row2, int col2, int slice2, int num_rows, int num_cols, int num_slices, REAL *dim_x, REAL *dim_y, REAL *dim_z, REAL *temp, REAL *next_temp, REAL *thermal_conduct_diff, int *cond_tc_index) {
REAL temp_diff; //Temperature difference between the two cells
REAL ad; //
temp_diff = temp[slice1*num_rows*num_cols + row2*num_cols + col1] - temp[slice1*num_rows*num_cols + row1*num_cols + col1];
ad = dim_y[row2]/thermal_conduct_diff[cond_tc_index[slice1*num_rows*num_cols + row2*num_cols + col1]] + dim_y[row1]/thermal_conduct_diff[cond_tc_index[slice1*num_rows*num_cols + row1*num_cols + col1]];
return 2*temp_diff/(ad*dim_y[row1]);
}
/**
* Calculates and returns the heat flow per year between two cells in the Z direction
* based on the provided indexes
*/
__device__ REAL cond_add_z(int row1, int col1, int slice1, int row2, int col2, int slice2, int num_rows, int num_cols, int num_slices, REAL *dim_x, REAL *dim_y, REAL *dim_z, REAL *temp, REAL *next_temp, REAL *thermal_conduct_diff, int *cond_tc_index) {
if(num_slices == 1) {
return 0.0;
}
REAL temp_diff;
REAL ad;
temp_diff = temp[slice2*num_rows*num_cols + row1*num_cols + col1] - temp[slice1*num_rows*num_cols + row1*num_cols + col1];
ad = dim_z[slice2]/thermal_conduct_diff[cond_tc_index[slice2*num_rows*num_cols + row1*num_cols + col1]] + dim_z[slice1]/thermal_conduct_diff[cond_tc_index[slice1*num_rows*num_cols + row1*num_cols + col1]];
return 2*temp_diff/(ad*dim_z[slice1]);
}
/**
* Calculates the in-plane heat flow due to conduction in a given slice k.
* If slices == 1
* 2d simulation, return 0 for 3rd dimension heat transfer
* else
* Calculate and return heat flow per year between two cells in the Z direction
*/
__device__ REAL in_plane_cond(int i, int j, int k, int num_rows, int num_cols, int num_slices, REAL *dim_x, REAL *dim_y, REAL *dim_z, REAL DHF, REAL *temp, REAL *next_temp, REAL *thermal_conduct_diff, int *cond_tc_index) {
REAL heat_flow_x;
REAL heat_flow_y;
/* k is fixed */
if(i == 0 && j == 0) { //Top left corner of slice
heat_flow_x = cond_add_x(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = 0.0;
}
else if(i == 0 && j == num_cols-1) { //Top right corner of slice
heat_flow_x = cond_add_x(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = 0.0;
}
else if(i == 0) { //Top of slice
heat_flow_x = cond_add_x(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_x(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = 0.0;
}
else if(i == num_rows-1 && j == 0) { //Bottom left corner of slice
heat_flow_x = cond_add_x(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = DHF/dim_y[i]; //Constant heat flow at the bottom of the model
}
else if(i == num_rows-1 && j == num_cols-1) { //Bottom right corner of slice
heat_flow_x = cond_add_x(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = DHF/dim_y[i]; //Constant heat flow at the bottom of the model
}
else if(i == num_rows-1) { //Bottom
heat_flow_x = cond_add_x(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_x(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = DHF/dim_y[i]; //Constant heat flow at the bottom of the model
}
else if(j == 0) { //Left side of slice
heat_flow_x = cond_add_x(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_y(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = 0.0;
}
else if(j == num_cols-1) { //Right side of slice
heat_flow_x = cond_add_x(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_y(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = 0.0;
}
else { //Middle of slice
heat_flow_x = cond_add_x(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_x(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
heat_flow_y = cond_add_y(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_y(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index);
next_temp[k*num_rows*num_cols + i*num_cols + j] = 0.0;
}
return (heat_flow_x + heat_flow_y);
}
/**
* Conduction Kernel
* Updates the temperature array using 3D conduction with finite
* difference heat flow.
*/
__global__ void conduction_kernel(int num_cells, int num_rows, int num_cols, int num_slices, REAL *dim_x, REAL *dim_y, REAL *dim_z, REAL DHF, REAL time_step, REAL *temp, REAL *next_temp, int *use_cond, REAL *heat_production_values, int *cond_hp_index, REAL *thermal_conduct_diff, int *cond_tc_index){
unsigned long long id = blockIdx.x*blockDim.x+threadIdx.x; //Thread ID
if(id < num_cells) {
int k = id/(num_rows*num_cols);
int i = (id- k*num_rows*num_cols)/num_cols;
int j = id - k*num_rows*num_cols - i*num_cols;
if(use_cond[k*num_rows*num_cols + i*num_cols + j] == 1) {
REAL heatflow_in_plane; //Heat flow occuring inside of plane
REAL heatflow_cross_plane; //Heat flow into and out of plane/slice
if (k == 0) { // First slice
heatflow_in_plane = in_plane_cond(i,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,DHF,temp,next_temp,thermal_conduct_diff,cond_tc_index); // heat transfer inside of plane
heatflow_cross_plane = cond_add_z(i,j,k,i,j,k+1,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index); // slice-to-slice heat transfer. first slice, so only from next slice transfers heat.
}
else if (k == num_slices - 1) { // Last slice
heatflow_in_plane = in_plane_cond(i,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,DHF,temp,next_temp,thermal_conduct_diff,cond_tc_index); // heat transfer inside of plane
heatflow_cross_plane = cond_add_z(i,j,k,i,j,k-1,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index); // slice-to-slice heat transfer. last slice, so only previous slice transfers heat.
}
else { // Middle
heatflow_in_plane = in_plane_cond(i,j,k,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,DHF,temp,next_temp,thermal_conduct_diff,cond_tc_index); // you get the idea
heatflow_cross_plane = cond_add_z(i,j,k,i,j,k+1,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index) + cond_add_z(i,j,k,i,j,k-1,num_rows,num_cols,num_slices,dim_x,dim_y,dim_z,temp,next_temp,thermal_conduct_diff,cond_tc_index); // slice-to-slice heat transfer. Middle, so both next and previous.
}
//Heat flow from the adjacent cells
next_temp[k*num_rows*num_cols + i*num_cols + j] += temp[k*num_rows*num_cols + i*num_cols + j] + time_step*(heatflow_in_plane + heatflow_cross_plane);
//Heat flow due to radioactive heat production
next_temp[k*num_rows*num_cols + i*num_cols + j] += heat_production_values[cond_hp_index[k*num_rows*num_cols + i*num_cols + j]]*time_step/DTC;
}
else {
next_temp[k*num_rows*num_cols + i*num_cols + j] = temp[k*num_rows*num_cols + i*num_cols + j];
}
}
}
/**
* Wrapper Function for the conduction kernel
*/
void conduction_cuda() {
//Calls the conduction kernel
conduction_kernel<<<dimGrid,dimBlock>>>(num_cells,num_rows,num_cols,num_slices,dev_dim_x,dev_dim_y,dev_dim_z,DHF,time_step,dev_temp,dev_next_temp,dev_use_cond,dev_heat_production_values,dev_cond_hp_index,dev_thermal_conduct_diff,dev_cond_tc_index);
//Waits for the kernel to finish executing
cudaThreadSynchronize();
//Checks if an error occured during execution of the kernel
error = cudaGetLastError();
if(error != cudaSuccess) {
cerr << "Error while executing conduction kernel" << endl;
cerr << error << " : " << cudaGetErrorString(error) << endl;
cerr << "Exiting the Program" << endl;
exit(1);
}
//Swaps the device temperature arrays
swap_temp_array_cuda();
}
/**
* Performs convection between two specified cells
*/
__device__ void perform_convection(int row1, int col1, int slice1, int row2, int col2, int slice2, int num_rows, int num_cols, int num_slices, REAL *dist_x, REAL *dist_y, REAL *dist_z, REAL time_inc, REAL *temp, REAL *next_temp, REAL *min_temp_conv, int *conv_min_temp_index, REAL *heat_capac_fluid, int *conv_fluid_index, REAL *heat_capac_rock, int *conv_rock_index, REAL *vel, int *conv_vel_index) {
REAL avg_x_dim; //distance between two temperature cells in the x direction
REAL avg_y_dim; //distance between two temperature cells in the y direction
REAL avg_z_dim; //distance between two temperature cells in the z direction
REAL amt; //
REAL dist; //Distance between the two cells
REAL ratio; //Ratio of amt to distance
//Checks if the specified cell is within the bounds of the simulation and if it has a high enough
//temperature to perform convection
if((row2 >= 0) && (row2 < num_rows) && (col2 >= 0) && (col2 < num_cols) && (slice2 >= 0) && (slice2 < num_slices) && (temp[slice2*num_rows*num_cols + row2*num_cols + col2] - min_temp_conv[conv_min_temp_index[slice1*num_rows*num_cols + row1*num_cols + col1]] >= 0)) {
avg_x_dim = dist_x[col1] - dist_x[col2];
avg_y_dim = dist_y[row1] - dist_y[row2];
avg_z_dim = dist_z[slice1] - dist_z[slice2];
amt = (vel[conv_vel_index[slice1*num_rows*num_cols + row1*num_cols + col1]]*heat_capac_fluid[conv_fluid_index[slice1*num_rows*num_cols + row1*num_cols + col1]]/heat_capac_rock[conv_rock_index[slice1*num_rows*num_cols + row1*num_cols + col1]])*time_inc;
dist = sqrt(avg_x_dim*avg_x_dim + avg_y_dim*avg_y_dim + avg_z_dim*avg_z_dim);
ratio = amt/dist;
if(ratio > 1) {
ratio = 0.999999;
}
next_temp[slice1*num_rows*num_cols + row1*num_cols + col1] = temp[slice1*num_rows*num_cols + row1*num_cols + col1] + ratio *(temp[slice2*num_rows*num_cols + row2*num_cols + col2]-temp[slice1*num_rows*num_cols + row1*num_cols + col1]);
}
else {
next_temp[slice1*num_rows*num_cols + row1*num_cols + col1] = temp[slice1*num_rows*num_cols + row1*num_cols + col1];
}
}
/**
* Convection Kernel
* Updates the temperature array using convection
*/
__global__ void convection_kernel(unsigned long long num_cells, int num_rows, int num_cols, int num_slices, REAL *dist_x, REAL *dist_y, REAL *dist_z, REAL time_inc, REAL *temp, REAL *next_temp, int *conv_codes, int *conv_direction, REAL *min_temp_conv, int *conv_min_temp_index, REAL *heat_capac_fluid, int *conv_fluid_index, REAL *heat_capac_rock, int *conv_rock_index, REAL *vel, int *conv_vel_index) {
unsigned long long id = blockIdx.x*blockDim.x+threadIdx.x; //Thread ID
if(id < num_cells) {
int k = id/(num_rows*num_cols);
int i = (id- k*num_rows*num_cols)/num_cols;
int j = id - k*num_rows*num_cols - i*num_cols;
//Checks if convection can occur for the specified cell
if((conv_codes[k*num_rows*num_cols + i*num_cols + j] <= 0) || (i == 0) || (conv_direction[k*num_rows*num_cols + i*num_cols + j] == 5) || (conv_direction[k*num_rows*num_cols + i*num_cols + j] < 1) || (conv_direction[k*num_rows*num_cols + i*num_cols + j] > 27)) {
next_temp[k*num_rows*num_cols + i*num_cols + j] = temp[k*num_rows*num_cols + i*num_cols + j];
}
else {
//Performs convection based on the convection direction code
switch(conv_direction[k*num_rows*num_cols + i*num_cols + j]) {
/**
* IN-PLANE convection -- 1 through 9. These codes are for convection taking place in the current, "k-th" plane
* 1 2 3
* 4 5 6
* 7 8 9
*/
case 1:
perform_convection(i,j,k,i-1,j-1,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 2:
perform_convection(i,j,k,i-1,j,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 3:
perform_convection(i,j,k,i-1,j+1,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 4:
perform_convection(i,j,k,i,j-1,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 6:
perform_convection(i,j,k,i,j+1,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 7:
perform_convection(i,j,k,i+1,j-1,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 8:
perform_convection(i,j,k,i+1,j,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 9:
perform_convection(i,j,k,i+1,j+1,k,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
/**
* CROSS-PLANE convection (previous "k-1th" plane) -- 10 through 18
* 10 11 12
* 13 14 15
* 16 17 18
*/
case 10:
perform_convection(i,j,k,i-1,j-1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 11:
perform_convection(i,j,k,i-1,j,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 12:
perform_convection(i,j,k,i-1,j+1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 13:
perform_convection(i,j,k,i,j-1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 14:
perform_convection(i,j,k,i,j,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 15:
perform_convection(i,j,k,i,j+1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 16:
perform_convection(i,j,k,i+1,j-1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 17:
perform_convection(i,j,k,i+1,j,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 18:
perform_convection(i,j,k,i+1,j+1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
/**
* CROSS-PLANE convection ("k+1th" plane) -- 19 through 27
* 19 20 21
* 22 23 24
* 25 26 27
*/
case 19:
perform_convection(i,j,k,i-1,j-1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 20:
perform_convection(i,j,k,i-1,j,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 21:
perform_convection(i,j,k,i-1,j+1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 22:
perform_convection(i,j,k,i,j-1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 23:
perform_convection(i,j,k,i,j,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 24:
perform_convection(i,j,k,i,j+1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 25:
perform_convection(i,j,k,i+1,j-1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 26:
perform_convection(i,j,k,i+1,j,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
case 27:
perform_convection(i,j,k,i+1,j+1,k-1,num_rows,num_cols,num_slices,dist_x,dist_y,dist_z,time_inc,temp,next_temp,min_temp_conv,conv_min_temp_index,heat_capac_fluid,conv_fluid_index,heat_capac_rock,conv_rock_index,vel,conv_vel_index);
break;
}
}
}
}
/**
* Wrapper function for the convection kernel
*/
void convection_cuda() {
for(int i = 0; i < num_conv_loops; i++) {
//Calls the convection kernel
convection_kernel<<<dimGrid,dimBlock>>>(num_cells,num_rows,num_cols,num_slices,dev_dist_x,dev_dist_y,dev_dist_z,time_inc,dev_temp,dev_next_temp,dev_conv_codes,dev_conv_direction,dev_min_temp_conv,dev_conv_min_temp_index,dev_heat_capac_fluid,dev_conv_fluid_index,dev_heat_capac_rock,dev_conv_rock_index,dev_vel,dev_conv_vel_index);
//Waits for the kernel to finish executing
cudaThreadSynchronize();
//Checks if an error occured during execution
error = cudaGetLastError();
if(error != cudaSuccess) {
cerr << "Error while executing convection kernel" << endl;
cerr << error << " : " << cudaGetErrorString(error) << endl;
cerr << "Exiting the Program" << endl;
exit(1);
}
//Swaps the device temperature arrays
swap_temp_array_cuda();
}
}
/**
* Finds and returns the maximum temperature difference between
* the current and next temperature arrays.
*/
REAL find_max_temp_diff() {
REAL max_diff = fabs(next_temp[0] - temp[0]);
REAL diff = 0.0;
for(int k = 0; k < num_slices; k++) {
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_cols; j++) {
diff = fabs(next_temp[k*num_rows*num_cols + i*num_cols + j] - temp[k*num_rows*num_cols + i*num_cols + j]);
if(diff > max_diff) {
max_diff = diff;
}
}
}
}
return max_diff;
}
/**
* Finds the index of a given x, y, and z value in meters and
* stores them in the index array
*/
void find_loc_index(REAL x_loc, REAL y_loc, REAL z_loc, int *index){
if(x_loc < 0) {
index[0] = -1;
}
else if(x_loc > max_dist_x) {
index[0] = num_cols;
}
else {
for(index[0] = 0; index[0] < num_cols; index[0]++) {
if(x_loc <= dist_x[index[0]]+dim_x[index[0]]/2.0) {
break;
}
}
}
if(y_loc < 0) {
index[1] = -1;
}
else if(y_loc > max_dist_y) {
index[1] = num_rows;
}
else {
for(index[1] = 0; index[1] < num_rows; index[1]++) {
if(y_loc <= dist_y[index[1]]+dim_y[index[1]]/2.0) {
break;
}
}
}
if(z_loc < 0) {
index[2] = -1;
}
else if(z_loc > max_dist_z) {
index[2] = num_slices;
}
else {
for(index[2] = 0; index[2] < num_slices; index[2]++) {
if(z_loc <= dist_x[index[2]]+dim_x[index[2]]/2.0) {
break;
}
}
}
}
/**
* Finds the indexes of two corners of the moving source
* if either falls within the model. The valid parts of the
* moving source are updated with the moving sources temperature
*/
void update_mvsrc(int index) {
if(mvsrc_valid[index] == 1) {
int loc_index[3], loc_offset_index[3];
find_loc_index(mvsrc_x[index],mvsrc_y[index],mvsrc_z[index],loc_index);
find_loc_index(mvsrc_x[index]+mvsrc_offset_x[index],mvsrc_y[index]+mvsrc_offset_y[index],mvsrc_z[index]+mvsrc_offset_z[index],loc_offset_index);
if((loc_index[0] >= 0 && loc_index[0] < num_cols && loc_index[1] >= 0 && loc_index[1] < num_rows && loc_index[2] >= 0 && loc_index[2] < num_slices) || (loc_offset_index[0] >= 0 && loc_offset_index[0] < num_cols && loc_offset_index[1] >= 0 && loc_offset_index[1] < num_rows && loc_offset_index[2] >= 0 && loc_offset_index[2] < num_slices)) {
for(int k = loc_index[2]; k <= loc_offset_index[2]; k++) {
for(int i = loc_index[1]; i <= loc_offset_index[1]; i++) {
for(int j = loc_index[0]; j <= loc_offset_index[0]; j++) {
if(i >= 0 && i < num_rows && j >= 0 && j < num_cols && k >= 0 && k < num_slices) {
temp[k*num_rows*num_cols + i*num_cols + j] = mvsrc_temp[index];
}
}
}
}
}
else {
mvsrc_valid[index] = 0;
}
}
}
/**
* Updates the moving sources velocity and position vectors
* then updates the temperatures in the current temp array
*/
void update_moving_sources() {
for(int i = 0; i < num_mvsrc; i++) {
mvsrc_vel_x[i] += mvsrc_accel_x[i]*time_step;
mvsrc_vel_y[i] += mvsrc_accel_y[i]*time_step;
mvsrc_vel_z[i] += mvsrc_accel_z[i]*time_step;
mvsrc_x[i] += mvsrc_vel_x[i]*time_step;
mvsrc_y[i] += mvsrc_vel_y[i]*time_step;
mvsrc_z[i] += mvsrc_vel_z[i]*time_step;
update_mvsrc(i);
}
}
/**
* Performs a finite heat flow simulation using
* conduction and convection.
*/
int main(int argc, char **argv) {
#ifdef DISPLAY
cout << "\t\t Finite Difference Heat Flow Simulation" << endl;
//Asks the user if they wish to visualize results
cout << endl << "Press 1 to run visualization, otherwise 0: ";
while(!(cin >> display_mode) || display_mode < 0 || display_mode > 1) {
clear_cin();
cout << "Incorrect input, to save the state of the model enter 1, else 0: ";
}
#else
cout << "\t\t Finite Difference Heat Flow Simulation" << endl;
#endif
int input_val; //Temporary int value
REAL temp_val; //Temporary REAL value
//Sets the current device
error = cudaSetDevice(0);
//Retrieves the properties of the device
error = cudaGetDeviceProperties(&deviceProp, 0);
if(error != cudaSuccess) {
cerr << endl << "Error while retrieving device properties" << endl;
cerr << error << ":" << cudaGetErrorString(error) << endl;
cerr << "Exiting the Program" << endl;
exit(0);
}
//Loads the input file for the simulation
load_file();
//Checks the total and used amount of device global memory after allocation
size_t free_memory; //Free memory on the device
size_t total_memory; //Total memory on the device
error = cudaMemGetInfo(&free_memory, &total_memory); //Retrieves the memory information for the device
if(error != cudaSuccess) {
cerr << endl << "Error while getting memory information" << endl;
cerr << error << ":" << cudaGetErrorString(error) << endl;
cerr << "Exiting the Program" << endl;
exit(0);
}
cout << "Free memory: "<< (unsigned int)free_memory << ", total memory: "<< (unsigned int)total_memory<<" (after initialization)" << endl;
/**
* Allows the user to change multiple rectangular blocks of temperatures
* within the model
*/
/*
cout << endl << endl << "To Change the Temp. on a Block, Enter 1, Else 0: ";
while(!(cin >> input_val) || input_val < 0 || input_val > 1) {
clear_cin();
cout << "Incorrect Input, Enter 1 to Change, Else 0: ";
}
//Warning, the row column pairs need to be space seperated not comma seperated
if(input_val == 1) {
int num_block, row1, row2, col1, col2, slice1, slice2;
REAL new_temp;
cout << "Enter the Number of Blocks to Change: ";
while(!(cin >> num_block) || num_block < 0) {
clear_cin();
cout << "Enter a number greater than or equal to 0: ";
}
for(int i = 0; i < num_block; i++) {
cout << endl << "Block " << i << endl;
cout << "Enter the Coordinates of the Upper Left Corner <row> <column> <slice>: ";
while(!(cin >> row1 >> col1 >> slice1) || row1 < 0 || col1 < 0 || slice1 < 0) {
clear_cin();
cout << "Incorrect input, enter three positive numbers with spaces: ";
}
cout << "Enter the Coordinates of the Lower Right Corner <row> <column> <slice>: ";
while(!(cin >> row2 >> col2 >> slice2) || row2 < row1 || col2 < col1 || slice2 < slice1) {
clear_cin();
cout << "Incorrect input, enter three positive numbers with spaces: ";
}
cout << endl << "Current Block Temps" << endl;
cout << setw(10) << "row" << " " << setw(10) << "col" << " " << setw(10) << "slice" << " " << setw(OUT_PRECISION+5) << "temp" << endl;
cout << setw(10) << row1 << " " << setw(10) << col1 << " " << setw(10) << slice1 << setw(OUT_PRECISION+5) << fixed << setprecision(OUT_PRECISION) << temp[slice1*num_rows*num_cols + row1*num_cols + col1] << endl;
cout << setw(10) << row2 << " " << setw(10) << col2 << " " << setw(10) << slice2 << setw(OUT_PRECISION+5) << temp[slice2*num_rows*num_cols + row2*num_cols + col2] << endl;
cout << "Enter a New Temperature For the Block: ";
while(!(cin >> new_temp)) {
clear_cin();
cout << "Incorrect input, enter a new temperature: ";
}
for(int i = row1; i < row2; i++) {
for(int j = col1; j < col2; j++) {
for(int k = slice1; k < slice2; k++) {
if(i >= 0 && i < num_rows && j >= 0 && j < num_cols && k >= 0 && k < num_slices) {
temp[k*num_rows*num_cols + i*num_cols + j] = new_temp;
}
}
}
}
}
}
*/
/**
* Allows the user to start one or more moving sources.
*/
using_moving_source = 0;
/*
cout << endl << endl << "To Start One or More Moving Sources Enter 1, Else Enter 0: ";
while(!(cin >> using_moving_source) || using_moving_source < 0 || using_moving_source > 1) {
clear_cin();
cout << "Incorrect Input, Enter 1 to Change, Else 0: ";
}
if(using_moving_source == 1) {
REAL mag, angle1, angle2;
cout << "Enter the number of moving sources: ";
while(!(cin >> num_mvsrc) || num_mvsrc <= 0) {
clear_cin();
cout << "Incorrect input, enter a number greater than 0: ";
}
mvsrc_x = new REAL[num_mvsrc];
mvsrc_y = new REAL[num_mvsrc];
mvsrc_z = new REAL[num_mvsrc];
mvsrc_offset_x = new REAL[num_mvsrc];
mvsrc_offset_y = new REAL[num_mvsrc];
mvsrc_offset_z = new REAL[num_mvsrc];
mvsrc_vel_x = new REAL[num_mvsrc];
mvsrc_vel_y = new REAL[num_mvsrc];
mvsrc_vel_z = new REAL[num_mvsrc];
mvsrc_accel_x = new REAL[num_mvsrc];
mvsrc_accel_y = new REAL[num_mvsrc];
mvsrc_accel_z = new REAL[num_mvsrc];
mvsrc_temp = new REAL[num_mvsrc];
mvsrc_valid = new int[num_mvsrc];
for(int i = 0; i < num_mvsrc; i++) {
cout << endl << "Moving source " << i << endl;
cout << "Valid coordinates are x=0-"<<max_dist_x<<" y=0-"<<max_dist_y<<" z=0-"<<max_dist_z<<":" << endl;
cout << "Enter the coordinates in meters for the corner closest to the origin, <x> <y> <z>: ";
while(!(cin >> mvsrc_x[i] >> mvsrc_y[i] >> mvsrc_z[i]) || mvsrc_x[i] < 0 || mvsrc_x[i] > max_dist_x || mvsrc_y[i] < 0 || mvsrc_y[i] > max_dist_y || mvsrc_z[i] < 0 || mvsrc_z[i] > max_dist_z) {
clear_cin();
cout << "Incorrect input, enter a valid coordinate between x=0-"<<max_dist_x<<" y=0-"<<max_dist_y<<" z=0-"<<max_dist_z<<":";
}
cout << "Valid sizes are x=0-"<<max_dist_x-mvsrc_x[i]<<" y=0-"<<max_dist_y-mvsrc_y[i]<<" z=0-"<<max_dist_z-mvsrc_z[i]<<":"<<endl;
cout << "Enter the size of the moving source in meters, <x size> <y size> <z size>: ";
while(!(cin >> mvsrc_offset_x[i] >> mvsrc_offset_y[i] >> mvsrc_offset_z[i]) || mvsrc_offset_x[i] <= 0 || mvsrc_offset_x[i] > max_dist_x-mvsrc_x[i] || mvsrc_offset_y[i] <= 0 || mvsrc_offset_y[i] > max_dist_y-mvsrc_y[i] || mvsrc_offset_z[i] <= 0 || mvsrc_offset_z[i] > max_dist_z-mvsrc_z[i]) {
clear_cin();
cout << "Incorrect input, enter a valid distance between x=0-"<<max_dist_x-mvsrc_x[i]<<" y=0-"<<max_dist_y-mvsrc_y[i]<<" z=0-"<<max_dist_z-mvsrc_z[i]<<":";
}
cout << "Enter the angle of the moving sources vector in degrees from positve x towards negative y (0-360): ";
while(!(cin >> angle1) || angle1 < 0 || angle1 > 360) {
clear_cin();
cout << "Incorrect input, enter a valid angle: ";
}
cout << "Enter the angle of the moving sources vector in degrees from positve z (0-180): ";
while(!(cin >> angle2) || angle2 < 0 || angle2 > 180) {
clear_cin();
cout << "Incorrect input, enter a valid angle: ";
}
cout << "Enter the magnitude of the velocity vector in m/year: ";
while(!(cin >> mag) || mag < 0) {
clear_cin();
cout << "Incorrect input, enter a velocity greater than 0: ";
}
mvsrc_vel_x[i] = mag*sin(angle2/180.0*M_PI)*cos(angle1/180.0*M_PI);
mvsrc_vel_y[i] = mag*sin(angle2/180.0*M_PI)*sin(angle1/180.0*M_PI);
mvsrc_vel_z[i] = mag*cos(angle2/180.0*M_PI);
cout << "Enter the magnitude of the acceleration vector in m/year^2: ";
while(!(cin >> mag) || mag < 0) {
clear_cin();
cout << "Incorrect input, enter an acceleration greater than 0: ";
}
mvsrc_accel_x[i] = mag*sin(angle2/180.0*M_PI)*cos(angle1/180.0*M_PI);
mvsrc_accel_y[i] = mag*sin(angle2/180.0*M_PI)*sin(angle1/180.0*M_PI);
mvsrc_accel_z[i] = mag*cos(angle2/180.0*M_PI);
cout << "Enter the temperature of the moving source: ";
while(!(cin >> mag)) {
clear_cin();
cout << "Incorrect input, enter a valid temperature: ";
}
mvsrc_temp[i] = mag;
mvsrc_valid[i] = 1;
update_mvsrc(i);
}
}
*/
//Allows the user to decrease the size of the time step
cout << endl << endl << "Each Iteration in Time Spans " << scientific << time_step << " Years" << endl;
cout << "Enter a Shorter Iteration Time in Years if Desired (any larger number otherwise): ";
while(!(cin >> temp_val) || temp_val <= 0) {
clear_cin();
cout << "Incorrect input, enter a number greater than 0: ";
}
if(temp_val < time_step) {
time_step = temp_val;
}
DHF = chf * QFAC * time_step;
//Calculates the number of convection loops to perform per time step
num_conv_loops = (int)(time_step/(10*tic));
if(num_conv_loops > 5) {
num_conv_loops = 5;
}
else if(num_conv_loops <= 0) {
num_conv_loops = 1;
}
//Calculates the time increment per convection loop
time_inc = time_step/num_conv_loops;
min_row_dim = 100.0;
for(int i = 0; i < num_rows; i++) {
if(dim_y[i] < min_row_dim) {
min_row_dim = dim_y[i];
}
}
//Asks the user for the runtime of the simulation
thermal_time_constant = min_row_dim*min_row_dim/max_thermal_conduct_diff;
cout << endl << endl << "The Thermal Time Constant for the Vertical Dimension is " << thermal_time_constant << " Years" << endl;
cout << "Enter Time Duration for Calculation in Years: ";
while(!(cin >> run_time) || run_time <= 0) {
clear_cin();
cout << "Incorrect input, enter a number greater than 0: ";
}
//Asks the user for the number of loops to perform between screen updates
cout << endl << endl << "Enter the Number of Loops Between Screen Updates: ";
while(!(cin >> num_loops) || num_loops <= 0) {
clear_cin();
cout << "Incorrect input, enter a number greater than 0: ";
}
use_tolerance = 0;
/*
cout << endl << endl << "To have the simulation stop once the temperature change meets a tolerance, Enter 1 otherwise 0: ";
while(!(cin >> use_tolerance) || use_tolerance < 0 || use_tolerance > 1) {
clear_cin();
cout << "Incorrect Input, Enter 1 to use a tolerance, Else 0: ";
}
if(use_tolerance == 1) {
cout << endl << "Enter the tolerance: ";
while(!(cin >> tolerance) || tolerance <= 0) {
clear_cin();
cout << "Incorrect input, enter a number greater than 0: ";
}
}
*/
//Initializes the simulation time to 0.0
sim_time = 0.0;
//Waits for the user to hit enter before beginning the simulation
cout << endl;
cin.ignore(numeric_limits <streamsize> ::max(), '\n' );
PressEnterToContinue();
/**
* The main loop of the simulation
*/
count = 0; //Number of loops performed
cout << endl << endl << num_loops << " loops between screen updates" << endl << endl;
if(use_tolerance == 0) {
cout << setw(15) << "num loops" << setw(20) << "run time (years)" << setw(20) << "sim time (years)" << endl;
}
else {
cout << setw(15) << "num loops" << setw(20) << "run time (years)" << setw(20) << "sim time (years)" << setw(20) << "Max temp diff" << endl;
}
#ifdef DISPLAY
if(display_mode == 1) {
array_minmax();
array_size = num_cols * num_rows;
color_field = new float[array_size * 3];
for (int i=0; i<array_size *3; i++) {
color_field[i] = 0.0;
}
glutInit(&argc, argv);
int windowWidth = glutGet(GLUT_SCREEN_WIDTH);
int windowHeight = glutGet(GLUT_SCREEN_HEIGHT);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(windowWidth, windowHeight);
glutInitWindowPosition(0, 0);
glutCreateWindow("ARC Simulation");
glViewport(0, 0, windowWidth,windowHeight);
glEnable (GL_BLEND);
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60, 1.77777f, 1.0, 20000.0);
glutDisplayFunc(display3D);
//glutMouseFunc(mouse_button);//Mouse motion and camera trans settings maintained for debugging
//glutMotionFunc(mouse_move);
glutKeyboardFunc(keyboard);
glutSpecialFunc(keyboardSpecial);
/*camera_trans[0] = -num_cols/2.0;
camera_trans[1] = num_rows/3.0;
camera_trans[2] = -num_rows*1.75*tan(28.0/180.0*M_PI);
camera_rot[0] = 28.0;
camera_trans_lag[0] = -num_cols/2.0;
camera_trans_lag[1] = num_rows/3.0;
camera_trans_lag[2] = -num_rows*1.75*tan(28.0/180.0*M_PI);
camera_rot_lag[0] = 28.0;
*/
//gluLookAt(num_cols/2.0,num_rows*0.1,num_rows,num_cols/2.0,-num_rows/3.0,0.0,0.0,1.0,0.0);
if(num_rows > num_cols) {
gluLookAt(num_cols/2.0,num_rows*0.1,num_rows,num_cols/2.0,-num_rows/3.0,0.0,0.0,1.0,0.0);
}
else {
gluLookAt(num_cols/2.0,num_rows*0.1,num_cols,num_cols/2.0,-num_rows/3.0,0.0,0.0,1.0,0.0);
}
glutMainLoop();
PressEnterToContinue();
}
else {
#endif
while(sim_time <= run_time) {
//Displays status information for the current loop
if(count%num_loops == 0) {
if(use_tolerance == 0) {
cout << setw(15) << count << setw(20) << fixed << setprecision(5) << sim_time << setw(20) << initial_time + sim_time << endl;
}
else {
cout << setw(15) << count << setw(20) << fixed << setprecision(5) << sim_time << setw(20) << initial_time + sim_time << setw(20) << max_temp_diff << endl;
}
//Saves the current state of the simulation if the save_state flag is set
if(save_state) {
save_model_state();
}
}
//Performs convection updates if the current simulation is using convection
if(using_convection) {
convection_cuda();
}
//Performs conduction calculations
conduction_cuda();
//Increments the simulation time and loop count
sim_time += time_step;
count++;
if(use_tolerance == 1) {
max_temp_diff = find_max_temp_diff();
if(max_temp_diff < tolerance) {
cout << "Maximum temperature change below the tolerance, stoping the simulation" << endl;
break;
}
}
//Updates the moving source
if(using_moving_source == 1) {
update_moving_sources();
}
}
//Saves the final result of the simulation
if(save_state == 1 || save_result == 1) {
save_model_state();
}
save_surfer();
//Waits for the user to hit enter before ending the simulation
cout << endl << "Simulation Complete" << endl;
PressEnterToContinue();
deallocate_memory();
deallocate_cuda_memory();
#ifdef DISPLAY
}
#endif
}
|
7d8eb8ab070b586d4f8ccccd0bc1ac60d4337052.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaRecvPost.hpp"
#include "conversions.hcu"
namespace PVCuda{
#ifdef PV_USE_CUDNN
#include <cudnn.h>
//Function to change PV representation to CUDNN representation
//Does 2 things: permutate ordering from [outFeature, ny, nx, inFeature] to [outFeature, inFeature, ny, nx]
//Reshapes the matrix if manyScale > 1 to map different "many" kernels into feature dimension
//Coalesced in input
__global__
void CudaPermutePVToCudnn(float* dest, float* src, int outFeatures, int ny, int nx, int inFeatures, int manyScaleX, int manyScaleY, int cropX, int cropY){
//parameter dimensions are in source PV format
int destNx = (nx-2*cropX)/manyScaleX;
int destNy = (ny-2*cropY)/manyScaleY;
int destInFeatures = inFeatures*manyScaleX*manyScaleY;
int kSrc = (blockIdx.x * blockDim.x) + threadIdx.x;
if(kSrc < outFeatures * ny * nx * inFeatures){
int kOF = kSrc/(ny*nx*inFeatures);
int kY = (kSrc % (ny*nx*inFeatures))/(nx*inFeatures);
int kX = (kSrc % (nx*inFeatures))/inFeatures;
int kIF = (kSrc % inFeatures);
//check if in bounds
if(kX < cropX || kX >= nx-cropX){
return;
}
else{
kX = kX - cropX;
}
if(kY < cropY || kY >= ny-cropY){
return;
}
else{
kY = kY - cropY;
}
//Recalculate x, y, and f based on manyScale
kIF = kIF + inFeatures * (kX % manyScaleX + (kY % manyScaleY) * manyScaleX);
kX = kX/manyScaleX;
kY = kY/manyScaleY;
int sOF = destInFeatures * destNy * destNx;
int sIF = destNy * destNx;
int sY = destNx;
int kDest = kOF * sOF + kIF * sIF + kY * sY + kX;
dest[kDest] = src[kSrc];
}
}
//Weights need to be reversed for cudnn
//No need to account for many because the PV representation matches with how gsyn was reshaped.
__global__
void CudaPermuteWeightsPVToCudnn(float* dest, float* src, int outFeatures, int ny, int nx, int inFeatures, int manyScaleX, int manyScaleY){
//Parameter dimensions are PV source dimensions
int kSrc = (blockIdx.x * blockDim.x) + threadIdx.x;
if(kSrc < outFeatures * manyScaleX * manyScaleY * ny * nx * inFeatures){
int kOF = kSrc/(ny*nx*inFeatures);
int kY = (kSrc % (ny*nx*inFeatures))/(nx*inFeatures);
int kX = (kSrc % (nx*inFeatures))/inFeatures;
int kIF = (kSrc % inFeatures);
int sOF = inFeatures * ny * nx;
int sIF = ny * nx;
int sY = nx;
int kDest = kOF * sOF + kIF * sIF + (ny-kY-1) * sY + (nx-kX-1);
dest[kDest] = src[kSrc];
}
}
__global__
void CudaPermuteCudnnToPV(float* dest, float* src, int outFeatures, int ny, int nx, int inFeatures, int manyScaleX, int manyScaleY){
//parameter dimensions are in dest PV format
int srcNx = nx/manyScaleX;
int srcNy = ny/manyScaleY;
int srcInFeatures = inFeatures*manyScaleX*manyScaleY;
int kDest = (blockIdx.x * blockDim.x) + threadIdx.x;
if(kDest < outFeatures * ny * nx * inFeatures){
int kOF = kDest/(ny*nx*inFeatures);
int kY = (kDest % (ny*nx*inFeatures))/(nx*inFeatures);
int kX = (kDest % (nx*inFeatures))/inFeatures;
int kIF = (kDest % inFeatures);
//Recalculate x, y, and f based on manyScale
kIF = kIF + inFeatures * (kX % manyScaleX + (kY % manyScaleY) * manyScaleX);
kX = kX/manyScaleX;
kY = kY/manyScaleY;
int sOF = srcInFeatures * srcNy * srcNx;
int sIF = srcNy * srcNx;
int sY = srcNx;
int kSrc = kOF * sOF + kIF * sIF + kY * sY + kX;
dest[kDest] = src[kSrc];
}
}
#endif // PV_USE_CUDNN
//Kernel code
__global__
void HyPerLayer_recv_post(recv_post_params params, int batch){
////Shared memory buffers are declared
extern __shared__ char sharedMem[];
__shared__ float* preBuffer;
__shared__ float* postBuffer;
__shared__ float* weightsBuffer;
postBuffer = (float*)sharedMem;
weightsBuffer = (float*)(&(postBuffer[params.postBufNum]));
if(params.preDataLocal){
preBuffer = (float*)(&(weightsBuffer[params.weightsBufNum]));
}
//Ordered this way because threads vary fastest in x, then y, then z
//Mapped to petavision order of f, x, and y
int localF = blockDim.x;
int localX = blockDim.y;
int localY = blockDim.z;
int localFIndex = threadIdx.x;
int localXIndex = threadIdx.y;
int localYIndex = threadIdx.z;
int fTargetRes = (blockIdx.x * blockDim.x) + threadIdx.x;
int xTargetRes = (blockIdx.y * blockDim.y) + threadIdx.y;
int yTargetRes = (blockIdx.z * blockDim.z) + threadIdx.z;
////Calculate kTargetRes based on x, y, and f
int kTargetRes = kIndex(xTargetRes, yTargetRes, fTargetRes, params.nxRes, params.nyRes, params.nf);
int kTargetExt = kIndexExtended(kTargetRes, params.nxRes, params.nyRes, params.nf, params.nblt, params.nbrt, params.nbdn, params.nbup);
//Each wIdx should be shared since each workgroup convolves one weight kernel
__shared__ int wIdx;
if(localXIndex == 0 && localYIndex == 0){
//Change restricted to extended post neuron
int kernelIndex;
if(params.sharedWeights == 1){
kernelIndex = params.patch2datalookuptable[kTargetExt];
}
else{
kernelIndex = kTargetExt;
}
wIdx = kernelIndex * params.nxp * params.nyp * params.nfp;
}
//Get top left most neuron in the group
__shared__ long localStartSourceExt;
long startSourceExt;
if(params.preDataLocal){
if(localXIndex == 0 && localYIndex == 0 && localFIndex == 0){
localStartSourceExt = params.startSourceExtBuf[kTargetRes];
}
}
else{
startSourceExt = params.startSourceExtBuf[kTargetRes];
}
int localIndex = kIndex(localXIndex, localYIndex, localFIndex, localX, localY, localF);
postBuffer[localIndex] = 0;
int numXfBuffer = params.localBufSizeX * params.nfp;
int numWeightsBuffer = params.nxp * params.nfp;
int xOffset = localXIndex * params.preToPostScaleX;
//int yOffset = localYIndex * params.preToPostScaleY;
int numCopyThreads = localF * localX * localY < warpSize ? localF * localX * localY : warpSize;
//Wait for shared memory loads
__syncthreads();
int preBatchOffset = batch * (params.preNx + params.preNblt + params.preNbrt) * (params.preNy + params.preNbup + params.preNbdn) * params.preNf;
for(int ky = 0; ky < params.nyp; ky++){
//Copy global to local, do this with all threads
if(params.preDataLocal){
//Pre buffer
if(localIndex < numCopyThreads){
for(int i = localIndex; i < numXfBuffer; i+= numCopyThreads){
preBuffer[i] = params.preData[preBatchOffset + localStartSourceExt + ky * params.sy + i];
}
}
}
//Weights
if(localIndex < numCopyThreads){
for(int i = localIndex; i < numWeightsBuffer; i+= numCopyThreads){
weightsBuffer[i] = params.weights[wIdx + ky * params.syp + i];
}
}
//The actual pre buffer index
__syncthreads();
float* activityY;
if(params.preDataLocal){
activityY = &(preBuffer[xOffset * params.nfp]);
}
else{
activityY = &(params.preData[preBatchOffset + startSourceExt + ky * params.sy]);
}
float* weightY = weightsBuffer;
//float* weightY = &(params.weights[wIdx + ky * params.syp]);
//Summing into post buffer indexed by localIndex
int k;
for (k = 0; k < params.numPerStride; k++) {
postBuffer[localIndex] += activityY[k]*weightY[k]*params.dt_factor;
}
__syncthreads();
}
////Sum into global memory
int postBatchOffset = batch * params.nxRes * params.nyRes * params.nf;
params.postGsyn[postBatchOffset + kTargetRes] += postBuffer[localIndex];
}
#ifdef PV_USE_CUDNN
void CudaRecvPost::callPermuteDatastorePVToCudnnKernel(int gridSize, int blockSize, int nbatch, int ny, int nx, int nf) {
//Datastore will never get reshaped, so manyScale will always be 1
hipLaunchKernelGGL(( CudaPermutePVToCudnn), dim3(gridSize), dim3(blockSize), 0, device->getStream(), params.cudnn_preData, params.preData, nbatch, ny, nx, nf, 1, 1, params.diffX, params.diffY);
}
void CudaRecvPost::callPermuteGSynPVToCudnnKernel(int gridSize, int blockSize, float* gSynPatchHead, int nbatch, int ny, int nx, int nf) {
hipLaunchKernelGGL(( CudaPermutePVToCudnn), dim3(gridSize), dim3(blockSize), 0, device->getStream(), params.cudnn_gSyn, gSynPatchHead, nbatch, ny, nx, nf, params.manyScaleX, params.manyScaleY, 0, 0);
}
void CudaRecvPost::callPermuteGSynCudnnToPVKernel(int gridSize, int blockSize, float* gSynPatchHead, int nbatch, int ny, int nx, int nf) {
hipLaunchKernelGGL(( CudaPermuteCudnnToPV), dim3(gridSize), dim3(blockSize), 0, device->getStream(), gSynPatchHead, params.cudnn_gSyn, nbatch, ny, nx, nf, params.manyScaleX, params.manyScaleY);
}
#endif // PV_USE_CUDNN
} // end namespace PVCuda
|
7d8eb8ab070b586d4f8ccccd0bc1ac60d4337052.cu
|
#include "CudaRecvPost.hpp"
#include "conversions.hcu"
namespace PVCuda{
#ifdef PV_USE_CUDNN
#include <cudnn.h>
//Function to change PV representation to CUDNN representation
//Does 2 things: permutate ordering from [outFeature, ny, nx, inFeature] to [outFeature, inFeature, ny, nx]
//Reshapes the matrix if manyScale > 1 to map different "many" kernels into feature dimension
//Coalesced in input
__global__
void CudaPermutePVToCudnn(float* dest, float* src, int outFeatures, int ny, int nx, int inFeatures, int manyScaleX, int manyScaleY, int cropX, int cropY){
//parameter dimensions are in source PV format
int destNx = (nx-2*cropX)/manyScaleX;
int destNy = (ny-2*cropY)/manyScaleY;
int destInFeatures = inFeatures*manyScaleX*manyScaleY;
int kSrc = (blockIdx.x * blockDim.x) + threadIdx.x;
if(kSrc < outFeatures * ny * nx * inFeatures){
int kOF = kSrc/(ny*nx*inFeatures);
int kY = (kSrc % (ny*nx*inFeatures))/(nx*inFeatures);
int kX = (kSrc % (nx*inFeatures))/inFeatures;
int kIF = (kSrc % inFeatures);
//check if in bounds
if(kX < cropX || kX >= nx-cropX){
return;
}
else{
kX = kX - cropX;
}
if(kY < cropY || kY >= ny-cropY){
return;
}
else{
kY = kY - cropY;
}
//Recalculate x, y, and f based on manyScale
kIF = kIF + inFeatures * (kX % manyScaleX + (kY % manyScaleY) * manyScaleX);
kX = kX/manyScaleX;
kY = kY/manyScaleY;
int sOF = destInFeatures * destNy * destNx;
int sIF = destNy * destNx;
int sY = destNx;
int kDest = kOF * sOF + kIF * sIF + kY * sY + kX;
dest[kDest] = src[kSrc];
}
}
//Weights need to be reversed for cudnn
//No need to account for many because the PV representation matches with how gsyn was reshaped.
__global__
void CudaPermuteWeightsPVToCudnn(float* dest, float* src, int outFeatures, int ny, int nx, int inFeatures, int manyScaleX, int manyScaleY){
//Parameter dimensions are PV source dimensions
int kSrc = (blockIdx.x * blockDim.x) + threadIdx.x;
if(kSrc < outFeatures * manyScaleX * manyScaleY * ny * nx * inFeatures){
int kOF = kSrc/(ny*nx*inFeatures);
int kY = (kSrc % (ny*nx*inFeatures))/(nx*inFeatures);
int kX = (kSrc % (nx*inFeatures))/inFeatures;
int kIF = (kSrc % inFeatures);
int sOF = inFeatures * ny * nx;
int sIF = ny * nx;
int sY = nx;
int kDest = kOF * sOF + kIF * sIF + (ny-kY-1) * sY + (nx-kX-1);
dest[kDest] = src[kSrc];
}
}
__global__
void CudaPermuteCudnnToPV(float* dest, float* src, int outFeatures, int ny, int nx, int inFeatures, int manyScaleX, int manyScaleY){
//parameter dimensions are in dest PV format
int srcNx = nx/manyScaleX;
int srcNy = ny/manyScaleY;
int srcInFeatures = inFeatures*manyScaleX*manyScaleY;
int kDest = (blockIdx.x * blockDim.x) + threadIdx.x;
if(kDest < outFeatures * ny * nx * inFeatures){
int kOF = kDest/(ny*nx*inFeatures);
int kY = (kDest % (ny*nx*inFeatures))/(nx*inFeatures);
int kX = (kDest % (nx*inFeatures))/inFeatures;
int kIF = (kDest % inFeatures);
//Recalculate x, y, and f based on manyScale
kIF = kIF + inFeatures * (kX % manyScaleX + (kY % manyScaleY) * manyScaleX);
kX = kX/manyScaleX;
kY = kY/manyScaleY;
int sOF = srcInFeatures * srcNy * srcNx;
int sIF = srcNy * srcNx;
int sY = srcNx;
int kSrc = kOF * sOF + kIF * sIF + kY * sY + kX;
dest[kDest] = src[kSrc];
}
}
#endif // PV_USE_CUDNN
//Kernel code
__global__
void HyPerLayer_recv_post(recv_post_params params, int batch){
////Shared memory buffers are declared
extern __shared__ char sharedMem[];
__shared__ float* preBuffer;
__shared__ float* postBuffer;
__shared__ float* weightsBuffer;
postBuffer = (float*)sharedMem;
weightsBuffer = (float*)(&(postBuffer[params.postBufNum]));
if(params.preDataLocal){
preBuffer = (float*)(&(weightsBuffer[params.weightsBufNum]));
}
//Ordered this way because threads vary fastest in x, then y, then z
//Mapped to petavision order of f, x, and y
int localF = blockDim.x;
int localX = blockDim.y;
int localY = blockDim.z;
int localFIndex = threadIdx.x;
int localXIndex = threadIdx.y;
int localYIndex = threadIdx.z;
int fTargetRes = (blockIdx.x * blockDim.x) + threadIdx.x;
int xTargetRes = (blockIdx.y * blockDim.y) + threadIdx.y;
int yTargetRes = (blockIdx.z * blockDim.z) + threadIdx.z;
////Calculate kTargetRes based on x, y, and f
int kTargetRes = kIndex(xTargetRes, yTargetRes, fTargetRes, params.nxRes, params.nyRes, params.nf);
int kTargetExt = kIndexExtended(kTargetRes, params.nxRes, params.nyRes, params.nf, params.nblt, params.nbrt, params.nbdn, params.nbup);
//Each wIdx should be shared since each workgroup convolves one weight kernel
__shared__ int wIdx;
if(localXIndex == 0 && localYIndex == 0){
//Change restricted to extended post neuron
int kernelIndex;
if(params.sharedWeights == 1){
kernelIndex = params.patch2datalookuptable[kTargetExt];
}
else{
kernelIndex = kTargetExt;
}
wIdx = kernelIndex * params.nxp * params.nyp * params.nfp;
}
//Get top left most neuron in the group
__shared__ long localStartSourceExt;
long startSourceExt;
if(params.preDataLocal){
if(localXIndex == 0 && localYIndex == 0 && localFIndex == 0){
localStartSourceExt = params.startSourceExtBuf[kTargetRes];
}
}
else{
startSourceExt = params.startSourceExtBuf[kTargetRes];
}
int localIndex = kIndex(localXIndex, localYIndex, localFIndex, localX, localY, localF);
postBuffer[localIndex] = 0;
int numXfBuffer = params.localBufSizeX * params.nfp;
int numWeightsBuffer = params.nxp * params.nfp;
int xOffset = localXIndex * params.preToPostScaleX;
//int yOffset = localYIndex * params.preToPostScaleY;
int numCopyThreads = localF * localX * localY < warpSize ? localF * localX * localY : warpSize;
//Wait for shared memory loads
__syncthreads();
int preBatchOffset = batch * (params.preNx + params.preNblt + params.preNbrt) * (params.preNy + params.preNbup + params.preNbdn) * params.preNf;
for(int ky = 0; ky < params.nyp; ky++){
//Copy global to local, do this with all threads
if(params.preDataLocal){
//Pre buffer
if(localIndex < numCopyThreads){
for(int i = localIndex; i < numXfBuffer; i+= numCopyThreads){
preBuffer[i] = params.preData[preBatchOffset + localStartSourceExt + ky * params.sy + i];
}
}
}
//Weights
if(localIndex < numCopyThreads){
for(int i = localIndex; i < numWeightsBuffer; i+= numCopyThreads){
weightsBuffer[i] = params.weights[wIdx + ky * params.syp + i];
}
}
//The actual pre buffer index
__syncthreads();
float* activityY;
if(params.preDataLocal){
activityY = &(preBuffer[xOffset * params.nfp]);
}
else{
activityY = &(params.preData[preBatchOffset + startSourceExt + ky * params.sy]);
}
float* weightY = weightsBuffer;
//float* weightY = &(params.weights[wIdx + ky * params.syp]);
//Summing into post buffer indexed by localIndex
int k;
for (k = 0; k < params.numPerStride; k++) {
postBuffer[localIndex] += activityY[k]*weightY[k]*params.dt_factor;
}
__syncthreads();
}
////Sum into global memory
int postBatchOffset = batch * params.nxRes * params.nyRes * params.nf;
params.postGsyn[postBatchOffset + kTargetRes] += postBuffer[localIndex];
}
#ifdef PV_USE_CUDNN
void CudaRecvPost::callPermuteDatastorePVToCudnnKernel(int gridSize, int blockSize, int nbatch, int ny, int nx, int nf) {
//Datastore will never get reshaped, so manyScale will always be 1
CudaPermutePVToCudnn<<<gridSize, blockSize, 0, device->getStream()>>>(params.cudnn_preData, params.preData, nbatch, ny, nx, nf, 1, 1, params.diffX, params.diffY);
}
void CudaRecvPost::callPermuteGSynPVToCudnnKernel(int gridSize, int blockSize, float* gSynPatchHead, int nbatch, int ny, int nx, int nf) {
CudaPermutePVToCudnn<<<gridSize, blockSize, 0, device->getStream()>>>(params.cudnn_gSyn, gSynPatchHead, nbatch, ny, nx, nf, params.manyScaleX, params.manyScaleY, 0, 0);
}
void CudaRecvPost::callPermuteGSynCudnnToPVKernel(int gridSize, int blockSize, float* gSynPatchHead, int nbatch, int ny, int nx, int nf) {
CudaPermuteCudnnToPV<<<gridSize, blockSize, 0, device->getStream()>>>(gSynPatchHead, params.cudnn_gSyn, nbatch, ny, nx, nf, params.manyScaleX, params.manyScaleY);
}
#endif // PV_USE_CUDNN
} // end namespace PVCuda
|
f286fa174b0abd2e1eebd88ae88cbce9b12955b1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<fstream>
#include<stdio.h>
#include<cstdlib>
#include<qapio.h>
#include<2optlib.h>
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true) {
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*
* use basic formula to compute cost of a permuation
*/
__device__ int initCost(int *d_flows, int *d_dist, int *d_sol, int nsize, int tidx) {
int calcost = 0;
int index = tidx * nsize;
for(int i = 0; i < nsize - 1; i++) {
for(int j = i + 1; j < nsize; j++)
calcost = calcost
+ (d_flows[(d_sol[index + i] - 1) * nsize + (d_sol[index + j] - 1)])
* d_dist[i * nsize + j];
}
for(int k = 1; k < nsize; k++) {
for(int l = 0; l < k;l++)
calcost = calcost
+ d_flows[(d_sol[index + k] - 1) * nsize + (d_sol[index + l]- 1)]
* d_dist[k * nsize + l];
}
return calcost;
}
/*
* compute cost of a permutation based on Burkard
*/
__device__ int neighborCost(int *d_flows, int *d_dist, int *d_sol, int nsize, int tidx, int i, int j) {
int offset = tidx * nsize;
int iUnit = d_sol[offset + i];
int jUnit = d_sol[offset + j];
int ccost = 0, gcost = 0, hcost = 0;
for(int k = 0; k < nsize; k++) {
int kUnit = d_sol[offset + k];
if (k != i && k != j) {
gcost = (d_dist[j * nsize + k] - d_dist[i * nsize + k]) *
(d_flows[(iUnit - 1) * nsize + (kUnit - 1)] - d_flows[(jUnit-1) * nsize + (kUnit - 1)]);
hcost = (d_dist[k * nsize + j] - d_dist[k * nsize + i]) *
(d_flows[(kUnit - 1) * nsize + (iUnit - 1)] - d_flows[(kUnit - 1) * nsize + (jUnit - 1)]);
ccost = ccost + (gcost + hcost);
}
}
return ccost;
}
/*
* copy src permutation to dest permutation
*/
__device__ void copy(int *dest, int *src, int size, int tidx) {
int offset = tidx * size;
for(int i = 0; i < size; i++)
dest[offset + i] = src[offset + i];
return;
}
/*
* swap units
*/
__device__ void swap(int *a,int *b) {
int temp=0;
temp = *a;
*a = *b;
*b = temp;
}
__global__ void twoOpt(int *d_dist,int *d_flows,int *d_sol, int nsize, int row,
int *d_result,int *d_bestsofar,int *d_bestcostsofar,int *d_newarray) {
int tidx = threadIdx.x + blockDim.x * blockIdx.x;
// number of inital solutions should equal number of threads
// if tidx > number of solutions, then something is wrong
if (tidx >= row)
return;
int index = tidx * nsize;
int dcost = 0, ecost = 0;
int delta = 0;
int tcost;
// calculate cost of initial solution
d_result[tidx] = initCost(d_flows, d_dist, d_sol, nsize, tidx);
d_bestcostsofar[tidx] = d_result[tidx];
copy(d_bestsofar,d_sol,nsize,tidx);
// search maxiters neighborhoods
for(int n = 0; n < nsize; n++) {
// loop-nest determines number of neighboring permuations evaluated
// for size = n, number of evals = (n - 1) + (n - 2) + (n - 3) + ... + 1 = n(n + 1)/2
for(int k = 0; k < nsize; k++) {
for(int j = k + 1; j < nsize; j++) {
// generate neighboring permuation by swapping a pair of units
copy(d_newarray, d_sol, nsize, tidx);
swap(&d_newarray[tidx * nsize + k], &d_newarray[tidx * nsize + j]);
int kUnit = d_sol[index + k] - 1;
int jUnit = d_sol[index + j] - 1;
// calculate cost of neighbor
dcost = (d_dist[j * nsize + k] - d_dist[k * nsize + j])
* (d_flows[kUnit * nsize + jUnit] - d_flows[jUnit * nsize + kUnit]);
ecost = (d_dist[j * nsize + j] - d_dist[k * nsize + k])
* (d_flows[kUnit * nsize + kUnit] - d_flows[jUnit * nsize + jUnit]);
delta = dcost + ecost + neighborCost(d_flows, d_dist, d_sol, nsize, tidx, k, j);
tcost = d_result[tidx] + delta;
// update results if a permuation with lower cost is found
if(tcost < d_bestcostsofar[tidx]) {
d_bestcostsofar[tidx] = tcost;
copy(d_bestsofar, d_newarray, nsize, tidx);
}
}
}
// best solution is center of next neighborhood
copy(d_sol, d_bestsofar, nsize, tidx);
d_result[tidx] = d_bestcostsofar[tidx];
}
return;
}
int main(int argc,char *argv[]) {
#ifdef PROFILE
clock_t cpu_startTime, cpu_endTime;
double cpu_ElapseTime=0;
cpu_startTime = clock();
#endif
if (argc != 4) {
cout << "usage: " << endl;
cout << "\t./2opt datafile solns randseed" << endl;
exit(1);
}
#ifdef DEBUG
cout <<"input file name: "<< argv[1] << endl;
cout <<"initial solutions: " << argv[2] << endl;
cout <<"seed value: " << argv[3] << endl;
#endif
string filename = argv[1];
int solns = atoi(argv[2]);
int iseed = atoi(argv[3]);
// read data from file
int size;
int **array;
readData(filename, &array, &size);
// split and flatten matrix
int *h_flows, *h_dist;
splitAndFlattenInt(&h_flows, &h_dist, array, size);
#ifdef DEBUG
cout << "problem size:" << size << endl;
#endif
#ifdef VDEBUG
printFlattenedArray(h_dist, size, size, "distances:");
printFlattenedArray(h_flows, size, size, "flows:");
#endif
if (iseed == 0)
srand(time(NULL));
else
srand(iseed);
int *h_sol;
genInitSolutions(&h_sol, size, solns);
#ifdef VDEBUG
printFlattenedArray(h_sol, size, size, "initial solutions:");
#endif
#ifdef PROFILE
hipEvent_t start , stop;
float ctime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
#endif
// allocate GPU memory
int *d_result = NULL;
int *d_bestsofar = NULL, *d_bestcostsofar = NULL, *d_newarray = NULL;
int *d_dist = NULL,*d_flows = NULL ,*d_sol = NULL;
gpuErrchk(hipMalloc((void **) &d_bestcostsofar, solns * sizeof(int)));
gpuErrchk(hipMalloc((void **) &d_bestsofar, solns * size * sizeof(int)));
gpuErrchk(hipMalloc((void **) &d_newarray, solns * size * sizeof(int)));
gpuErrchk(hipMalloc((void **) &d_result, solns * sizeof(int)));
gpuErrchk(hipMalloc((void **) &d_dist, size * size * sizeof(int)));
gpuErrchk(hipMalloc((void **) &d_flows, size * size * sizeof(int)));
gpuErrchk(hipMalloc((void **) &d_sol, solns * size * sizeof(int)));
// allocated CPU memory
int *h_result = (int *) malloc(solns * sizeof(int));
int *h_bestsofar = (int *) malloc(solns * size * sizeof(int));
int *h_bestcostsofar = (int *) malloc(solns * sizeof(int));
// copy host data to device
gpuErrchk(hipMemcpy(d_dist,h_dist,size*size*sizeof(int),hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_flows,h_flows,size*size*sizeof(int),hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_sol,h_sol,solns*size*sizeof(int),hipMemcpyHostToDevice));
// determine thread configurations
int threadsPerBlock = 1024;
// int blockPerGrid = (solns + threadsPerBlock - 1) / threadsPerBlock;
int blockPerGrid = solns / threadsPerBlock;
#if DEBUG
cout << "blocks: " << blockPerGrid << " " << endl;
cout << "threads: " << threadsPerBlock << " " << endl;
#endif
hipLaunchKernelGGL(( twoOpt), dim3(blockPerGrid), dim3(threadsPerBlock) , 0, 0, d_dist, d_flows, d_sol, size, solns,
d_result, d_bestsofar, d_bestcostsofar, d_newarray);
// copy device data to host
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipMemcpy(h_bestcostsofar, d_bestcostsofar, solns * sizeof(int),hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_bestsofar, d_bestsofar, solns * size * sizeof(int),hipMemcpyDeviceToHost));
#ifdef PROFILE
hipEventRecord(stop,0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
#endif
#ifdef VDEBUG
printFlattenedArray(h_bestsofar, solns, size, "Best permutations");
printRegularArray(h_bestcostsofar, solns, "Best costs:");
#endif
// find the best among all the solutions resturneed
int minIndex = 0;
int minCost = findMin(h_bestcostsofar, solns, &minIndex);
// print results
cout << "problem size: " << size << endl;
cout << "best cost: " << minCost << endl;
cout << "best solution: " ;
for(int i = 0; i < size; i++)
cout << h_bestsofar[minIndex * size + i] << " ";
cout << endl;
#ifdef PROFILE
cpu_endTime = clock();
cpu_ElapseTime= ((cpu_endTime - cpu_startTime) / (double) CLOCKS_PER_SEC);
fprintf(stdout, "total exec time (s):\t %2.2f\n", cpu_ElapseTime);
hipEventElapsedTime(&ctime, start , stop);
fprintf(stdout, "kernel exec:\t %2.2f\n", (ctime / 1000));
hipEventDestroy(start);
hipEventDestroy(stop);
#endif
free(array);
free(h_dist);
free(h_flows);
free(h_sol);
free(h_result);
free(h_bestsofar);
free(h_bestcostsofar);
hipFree(d_dist);
hipFree(d_flows);
hipFree(d_sol);
hipFree(d_bestcostsofar);
hipFree(d_newarray);
hipFree(d_result);
hipFree(d_bestsofar);
return 0;
}
|
f286fa174b0abd2e1eebd88ae88cbce9b12955b1.cu
|
#include<iostream>
#include<fstream>
#include<stdio.h>
#include<cstdlib>
#include<qapio.h>
#include<2optlib.h>
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*
* use basic formula to compute cost of a permuation
*/
__device__ int initCost(int *d_flows, int *d_dist, int *d_sol, int nsize, int tidx) {
int calcost = 0;
int index = tidx * nsize;
for(int i = 0; i < nsize - 1; i++) {
for(int j = i + 1; j < nsize; j++)
calcost = calcost
+ (d_flows[(d_sol[index + i] - 1) * nsize + (d_sol[index + j] - 1)])
* d_dist[i * nsize + j];
}
for(int k = 1; k < nsize; k++) {
for(int l = 0; l < k;l++)
calcost = calcost
+ d_flows[(d_sol[index + k] - 1) * nsize + (d_sol[index + l]- 1)]
* d_dist[k * nsize + l];
}
return calcost;
}
/*
* compute cost of a permutation based on Burkard
*/
__device__ int neighborCost(int *d_flows, int *d_dist, int *d_sol, int nsize, int tidx, int i, int j) {
int offset = tidx * nsize;
int iUnit = d_sol[offset + i];
int jUnit = d_sol[offset + j];
int ccost = 0, gcost = 0, hcost = 0;
for(int k = 0; k < nsize; k++) {
int kUnit = d_sol[offset + k];
if (k != i && k != j) {
gcost = (d_dist[j * nsize + k] - d_dist[i * nsize + k]) *
(d_flows[(iUnit - 1) * nsize + (kUnit - 1)] - d_flows[(jUnit-1) * nsize + (kUnit - 1)]);
hcost = (d_dist[k * nsize + j] - d_dist[k * nsize + i]) *
(d_flows[(kUnit - 1) * nsize + (iUnit - 1)] - d_flows[(kUnit - 1) * nsize + (jUnit - 1)]);
ccost = ccost + (gcost + hcost);
}
}
return ccost;
}
/*
* copy src permutation to dest permutation
*/
__device__ void copy(int *dest, int *src, int size, int tidx) {
int offset = tidx * size;
for(int i = 0; i < size; i++)
dest[offset + i] = src[offset + i];
return;
}
/*
* swap units
*/
__device__ void swap(int *a,int *b) {
int temp=0;
temp = *a;
*a = *b;
*b = temp;
}
__global__ void twoOpt(int *d_dist,int *d_flows,int *d_sol, int nsize, int row,
int *d_result,int *d_bestsofar,int *d_bestcostsofar,int *d_newarray) {
int tidx = threadIdx.x + blockDim.x * blockIdx.x;
// number of inital solutions should equal number of threads
// if tidx > number of solutions, then something is wrong
if (tidx >= row)
return;
int index = tidx * nsize;
int dcost = 0, ecost = 0;
int delta = 0;
int tcost;
// calculate cost of initial solution
d_result[tidx] = initCost(d_flows, d_dist, d_sol, nsize, tidx);
d_bestcostsofar[tidx] = d_result[tidx];
copy(d_bestsofar,d_sol,nsize,tidx);
// search maxiters neighborhoods
for(int n = 0; n < nsize; n++) {
// loop-nest determines number of neighboring permuations evaluated
// for size = n, number of evals = (n - 1) + (n - 2) + (n - 3) + ... + 1 = n(n + 1)/2
for(int k = 0; k < nsize; k++) {
for(int j = k + 1; j < nsize; j++) {
// generate neighboring permuation by swapping a pair of units
copy(d_newarray, d_sol, nsize, tidx);
swap(&d_newarray[tidx * nsize + k], &d_newarray[tidx * nsize + j]);
int kUnit = d_sol[index + k] - 1;
int jUnit = d_sol[index + j] - 1;
// calculate cost of neighbor
dcost = (d_dist[j * nsize + k] - d_dist[k * nsize + j])
* (d_flows[kUnit * nsize + jUnit] - d_flows[jUnit * nsize + kUnit]);
ecost = (d_dist[j * nsize + j] - d_dist[k * nsize + k])
* (d_flows[kUnit * nsize + kUnit] - d_flows[jUnit * nsize + jUnit]);
delta = dcost + ecost + neighborCost(d_flows, d_dist, d_sol, nsize, tidx, k, j);
tcost = d_result[tidx] + delta;
// update results if a permuation with lower cost is found
if(tcost < d_bestcostsofar[tidx]) {
d_bestcostsofar[tidx] = tcost;
copy(d_bestsofar, d_newarray, nsize, tidx);
}
}
}
// best solution is center of next neighborhood
copy(d_sol, d_bestsofar, nsize, tidx);
d_result[tidx] = d_bestcostsofar[tidx];
}
return;
}
int main(int argc,char *argv[]) {
#ifdef PROFILE
clock_t cpu_startTime, cpu_endTime;
double cpu_ElapseTime=0;
cpu_startTime = clock();
#endif
if (argc != 4) {
cout << "usage: " << endl;
cout << "\t./2opt datafile solns randseed" << endl;
exit(1);
}
#ifdef DEBUG
cout <<"input file name: "<< argv[1] << endl;
cout <<"initial solutions: " << argv[2] << endl;
cout <<"seed value: " << argv[3] << endl;
#endif
string filename = argv[1];
int solns = atoi(argv[2]);
int iseed = atoi(argv[3]);
// read data from file
int size;
int **array;
readData(filename, &array, &size);
// split and flatten matrix
int *h_flows, *h_dist;
splitAndFlattenInt(&h_flows, &h_dist, array, size);
#ifdef DEBUG
cout << "problem size:" << size << endl;
#endif
#ifdef VDEBUG
printFlattenedArray(h_dist, size, size, "distances:");
printFlattenedArray(h_flows, size, size, "flows:");
#endif
if (iseed == 0)
srand(time(NULL));
else
srand(iseed);
int *h_sol;
genInitSolutions(&h_sol, size, solns);
#ifdef VDEBUG
printFlattenedArray(h_sol, size, size, "initial solutions:");
#endif
#ifdef PROFILE
cudaEvent_t start , stop;
float ctime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
#endif
// allocate GPU memory
int *d_result = NULL;
int *d_bestsofar = NULL, *d_bestcostsofar = NULL, *d_newarray = NULL;
int *d_dist = NULL,*d_flows = NULL ,*d_sol = NULL;
gpuErrchk(cudaMalloc((void **) &d_bestcostsofar, solns * sizeof(int)));
gpuErrchk(cudaMalloc((void **) &d_bestsofar, solns * size * sizeof(int)));
gpuErrchk(cudaMalloc((void **) &d_newarray, solns * size * sizeof(int)));
gpuErrchk(cudaMalloc((void **) &d_result, solns * sizeof(int)));
gpuErrchk(cudaMalloc((void **) &d_dist, size * size * sizeof(int)));
gpuErrchk(cudaMalloc((void **) &d_flows, size * size * sizeof(int)));
gpuErrchk(cudaMalloc((void **) &d_sol, solns * size * sizeof(int)));
// allocated CPU memory
int *h_result = (int *) malloc(solns * sizeof(int));
int *h_bestsofar = (int *) malloc(solns * size * sizeof(int));
int *h_bestcostsofar = (int *) malloc(solns * sizeof(int));
// copy host data to device
gpuErrchk(cudaMemcpy(d_dist,h_dist,size*size*sizeof(int),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_flows,h_flows,size*size*sizeof(int),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_sol,h_sol,solns*size*sizeof(int),cudaMemcpyHostToDevice));
// determine thread configurations
int threadsPerBlock = 1024;
// int blockPerGrid = (solns + threadsPerBlock - 1) / threadsPerBlock;
int blockPerGrid = solns / threadsPerBlock;
#if DEBUG
cout << "blocks: " << blockPerGrid << " " << endl;
cout << "threads: " << threadsPerBlock << " " << endl;
#endif
twoOpt<<< blockPerGrid, threadsPerBlock >>>(d_dist, d_flows, d_sol, size, solns,
d_result, d_bestsofar, d_bestcostsofar, d_newarray);
// copy device data to host
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(h_bestcostsofar, d_bestcostsofar, solns * sizeof(int),cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_bestsofar, d_bestsofar, solns * size * sizeof(int),cudaMemcpyDeviceToHost));
#ifdef PROFILE
cudaEventRecord(stop,0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
#endif
#ifdef VDEBUG
printFlattenedArray(h_bestsofar, solns, size, "Best permutations");
printRegularArray(h_bestcostsofar, solns, "Best costs:");
#endif
// find the best among all the solutions resturneed
int minIndex = 0;
int minCost = findMin(h_bestcostsofar, solns, &minIndex);
// print results
cout << "problem size: " << size << endl;
cout << "best cost: " << minCost << endl;
cout << "best solution: " ;
for(int i = 0; i < size; i++)
cout << h_bestsofar[minIndex * size + i] << " ";
cout << endl;
#ifdef PROFILE
cpu_endTime = clock();
cpu_ElapseTime= ((cpu_endTime - cpu_startTime) / (double) CLOCKS_PER_SEC);
fprintf(stdout, "total exec time (s):\t %2.2f\n", cpu_ElapseTime);
cudaEventElapsedTime(&ctime, start , stop);
fprintf(stdout, "kernel exec:\t %2.2f\n", (ctime / 1000));
cudaEventDestroy(start);
cudaEventDestroy(stop);
#endif
free(array);
free(h_dist);
free(h_flows);
free(h_sol);
free(h_result);
free(h_bestsofar);
free(h_bestcostsofar);
cudaFree(d_dist);
cudaFree(d_flows);
cudaFree(d_sol);
cudaFree(d_bestcostsofar);
cudaFree(d_newarray);
cudaFree(d_result);
cudaFree(d_bestsofar);
return 0;
}
|
c4adbdd1214dd4023cc7765fa749d76854edb829.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
src/cuda/common.cpp -- CUDA backend (wrapper routines)
Enoki is a C++ template library that enables transparent vectorization
of numerical kernels using SIMD instruction sets available on current
processor architectures.
Copyrighe (c) 2019 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#include "common_hip.cuh"
NAMESPACE_BEGIN(enoki)
std::string mem_string(size_t size) {
const char *orders[] = {
"B", "KiB", "MiB", "GiB",
"TiB", "PiB", "EiB"
};
float value = (float) size;
int i = 0;
for (i = 0; i < 6 && value >= 1024.f; ++i)
value /= 1024.f;
char buf[32];
snprintf(buf, 32, "%.5g %s", value, orders[i]);
return buf;
}
std::string time_string(size_t value_) {
struct Order { float factor; const char* suffix; };
const Order orders[] = { { 0, "us" }, { 1000, "ms" },
{ 1000, "s" }, { 60, "m" },
{ 60, "h" }, { 24, "d" },
{ 7, "w" }, { (float) 52.1429, "y" } };
int i = 0;
float value = (float) value_;
for (i = 0; i < 7 && value > orders[i+1].factor; ++i)
value /= orders[i+1].factor;
char buf[32];
snprintf(buf, 32, "%.5g %s", value, orders[i].suffix);
return buf;
}
ENOKI_EXPORT void* cuda_malloc_zero(size_t size) {
void *result = cuda_malloc(size);
cuda_check(hipMemsetAsync(result, 0, size));
return result;
}
template <typename T> __global__ void fill(size_t n, T value, T *out) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x)
out[i] = value;
}
template <typename T> __global__ void set_value(T *ptr, size_t idx, T value) {
ptr[idx] = value;
}
ENOKI_EXPORT void* cuda_malloc_fill(size_t size, uint8_t value) {
uint8_t *result = (uint8_t *) cuda_malloc(size);
cuda_check(hipMemsetAsync(result, value, size));
return result;
}
ENOKI_EXPORT void* cuda_malloc_fill(size_t size, uint16_t value) {
uint16_t *result = (uint16_t *) cuda_malloc(size * sizeof(uint16_t));
hipLaunchKernelGGL(( fill), dim3(256), dim3(256), 0, 0, size, value, result);
return result;
}
ENOKI_EXPORT void* cuda_malloc_fill(size_t size, uint32_t value) {
uint32_t *result = (uint32_t *) cuda_malloc(size * sizeof(uint32_t));
hipLaunchKernelGGL(( fill), dim3(256), dim3(256), 0, 0, size, value, result);
return result;
}
ENOKI_EXPORT void* cuda_malloc_fill(size_t size, uint64_t value) {
uint64_t *result = (uint64_t *) cuda_malloc(size * sizeof(uint64_t));
hipLaunchKernelGGL(( fill), dim3(256), dim3(256), 0, 0, size, value, result);
return result;
}
ENOKI_EXPORT void cuda_memcpy_to_device(void *dst, const void *src, size_t size) {
cuda_check(hipMemcpy(dst, src, size, hipMemcpyHostToDevice));
}
ENOKI_EXPORT void cuda_memcpy_from_device(void *dst, const void *src, size_t size) {
cuda_check(hipMemcpy(dst, src, size, hipMemcpyDeviceToHost));
}
ENOKI_EXPORT void cuda_memcpy_to_device_async(void *dst, const void *src, size_t size) {
cuda_check(hipMemcpyAsync(dst, src, size, hipMemcpyHostToDevice));
}
ENOKI_EXPORT void cuda_memcpy_from_device_async(void *dst, const void *src, size_t size) {
cuda_check(hipMemcpyAsync(dst, src, size, hipMemcpyDeviceToHost));
}
ENOKI_EXPORT void cuda_mem_get_info(size_t *free, size_t *total) {
cuda_check(hipMemGetInfo(free, total));
}
struct CUDAErrorList {
hipError_t id;
const char *value;
};
static CUDAErrorList __cuda_error_list[] = {
{ hipSuccess,
"hipSuccess"},
{ hipErrorInvalidValue,
"hipErrorInvalidValue"},
{ hipErrorMemoryAllocation,
"hipErrorMemoryAllocation"},
{ hipErrorNotInitialized,
"hipErrorNotInitialized"},
{ hipErrorDeinitialized,
"hipErrorDeinitialized"},
{ hipErrorProfilerDisabled,
"hipErrorProfilerDisabled"},
{ hipErrorProfilerNotInitialized,
"hipErrorProfilerNotInitialized"},
{ hipErrorProfilerAlreadyStarted,
"hipErrorProfilerAlreadyStarted"},
{ hipErrorProfilerAlreadyStopped,
"hipErrorProfilerAlreadyStopped"},
{ hipErrorNoDevice,
"hipErrorNoDevice"},
{ hipErrorInvalidDevice,
"hipErrorInvalidDevice"},
{ hipErrorInvalidImage,
"hipErrorInvalidImage"},
{ hipErrorInvalidContext,
"hipErrorInvalidContext"},
{ hipErrorContextAlreadyCurrent,
"hipErrorContextAlreadyCurrent"},
{ hipErrorMapFailed,
"hipErrorMapFailed"},
{ hipErrorUnmapFailed,
"hipErrorUnmapFailed"},
{ hipErrorArrayIsMapped,
"hipErrorArrayIsMapped"},
{ hipErrorAlreadyMapped,
"hipErrorAlreadyMapped"},
{ hipErrorNoBinaryForGpu,
"hipErrorNoBinaryForGpu"},
{ hipErrorAlreadyAcquired,
"hipErrorAlreadyAcquired"},
{ hipErrorNotMapped,
"hipErrorNotMapped"},
{ hipErrorNotMappedAsArray,
"hipErrorNotMappedAsArray"},
{ hipErrorNotMappedAsPointer,
"hipErrorNotMappedAsPointer"},
{ hipErrorECCNotCorrectable,
"hipErrorECCNotCorrectable"},
{ hipErrorUnsupportedLimit,
"hipErrorUnsupportedLimit"},
{ hipErrorContextAlreadyInUse,
"hipErrorContextAlreadyInUse"},
{ hipErrorPeerAccessUnsupported,
"hipErrorPeerAccessUnsupported"},
{ hipErrorInvalidKernelFile,
"hipErrorInvalidKernelFile"},
{ hipErrorInvalidGraphicsContext,
"hipErrorInvalidGraphicsContext"},
{ hipErrorNvlinkUncorrectable,
"hipErrorNvlinkUncorrectable"},
{ CUDA_ERROR_JIT_COMPILER_NOT_FOUND,
"CUDA_ERROR_JIT_COMPILER_NOT_FOUND"},
{ hipErrorInvalidSource,
"hipErrorInvalidSource"},
{ hipErrorFileNotFound,
"hipErrorFileNotFound"},
{ hipErrorSharedObjectSymbolNotFound,
"hipErrorSharedObjectSymbolNotFound"},
{ hipErrorSharedObjectInitFailed,
"hipErrorSharedObjectInitFailed"},
{ hipErrorOperatingSystem,
"hipErrorOperatingSystem"},
{ hipErrorInvalidResourceHandle,
"hipErrorInvalidResourceHandle"},
{ hipErrorNotFound,
"hipErrorNotFound"},
{ hipErrorNotReady,
"hipErrorNotReady"},
{ hipErrorIllegalAddress,
"hipErrorIllegalAddress"},
{ hipErrorLaunchOutOfResources,
"hipErrorLaunchOutOfResources"},
{ hipErrorLaunchTimeOut,
"hipErrorLaunchTimeOut"},
{ hipErrorLaunchIncompatibleTexturing,
"hipErrorLaunchIncompatibleTexturing"},
{ hipErrorPeerAccessAlreadyEnabled,
"hipErrorPeerAccessAlreadyEnabled"},
{ hipErrorPeerAccessNotEnabled,
"hipErrorPeerAccessNotEnabled"},
{ hipErrorPrimaryContextActive,
"hipErrorPrimaryContextActive"},
{ hipErrorContextIsDestroyed,
"hipErrorContextIsDestroyed"},
{ hipErrorAssert,
"hipErrorAssert"},
{ hipErrorTooManyPeers,
"hipErrorTooManyPeers"},
{ hipErrorHostMemoryAlreadyRegistered,
"hipErrorHostMemoryAlreadyRegistered"},
{ hipErrorHostMemoryNotRegistered,
"hipErrorHostMemoryNotRegistered"},
{ hipErrorHardwareStackError,
"hipErrorHardwareStackError"},
{ hipErrorIllegalInstruction,
"hipErrorIllegalInstruction"},
{ hipErrorMisalignedAddress,
"hipErrorMisalignedAddress"},
{ hipErrorInvalidAddressSpace,
"hipErrorInvalidAddressSpace"},
{ hipErrorInvalidPc,
"hipErrorInvalidPc"},
{ hipErrorLaunchFailure,
"hipErrorLaunchFailure"},
{ CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE,
"CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE"},
{ hipErrorNotPermitted,
"hipErrorNotPermitted"},
{ hipErrorNotSupported,
"hipErrorNotSupported"},
{ hipErrorUnknown,
"hipErrorUnknown"},
{ (hipError_t) -1, nullptr }
};
ENOKI_EXPORT const char *cuda_error_string(hipError_t id) {
int index = 0;
while (__cuda_error_list[index].id != id &&
__cuda_error_list[index].id != (hipError_t) -1)
index++;
if (__cuda_error_list[index].id == id)
return __cuda_error_list[index].value;
else
return "Invalid CUDA error status!";
}
ENOKI_EXPORT void cuda_check_impl(hipError_t errval, const char *file, const int line) {
if (errval != hipSuccess && errval != hipErrorDeinitialized) {
const char *err_msg = cuda_error_string(errval);
fprintf(stderr,
"cuda_check(): driver API error = %04d \"%s\" in "
"%s:%i.\n", (int) errval, err_msg, file, line);
exit(EXIT_FAILURE);
}
}
ENOKI_EXPORT void cuda_check_impl(hipError_t errval, const char *file, const int line) {
if (errval != hipSuccess && errval != hipErrorDeinitialized) {
const char *err_msg = hipGetErrorName(errval);
fprintf(stderr,
"cuda_check(): runtime API error = %04d \"%s\" in "
"%s:%i.\n", (int) errval, err_msg, file, line);
exit(EXIT_FAILURE);
}
}
NAMESPACE_END(enoki)
|
c4adbdd1214dd4023cc7765fa749d76854edb829.cu
|
/*
src/cuda/common.cpp -- CUDA backend (wrapper routines)
Enoki is a C++ template library that enables transparent vectorization
of numerical kernels using SIMD instruction sets available on current
processor architectures.
Copyrighe (c) 2019 Wenzel Jakob <[email protected]>
All rights reserved. Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
#include <cuda.h>
#include <stdio.h>
#include <iostream>
#include "common.cuh"
NAMESPACE_BEGIN(enoki)
std::string mem_string(size_t size) {
const char *orders[] = {
"B", "KiB", "MiB", "GiB",
"TiB", "PiB", "EiB"
};
float value = (float) size;
int i = 0;
for (i = 0; i < 6 && value >= 1024.f; ++i)
value /= 1024.f;
char buf[32];
snprintf(buf, 32, "%.5g %s", value, orders[i]);
return buf;
}
std::string time_string(size_t value_) {
struct Order { float factor; const char* suffix; };
const Order orders[] = { { 0, "us" }, { 1000, "ms" },
{ 1000, "s" }, { 60, "m" },
{ 60, "h" }, { 24, "d" },
{ 7, "w" }, { (float) 52.1429, "y" } };
int i = 0;
float value = (float) value_;
for (i = 0; i < 7 && value > orders[i+1].factor; ++i)
value /= orders[i+1].factor;
char buf[32];
snprintf(buf, 32, "%.5g %s", value, orders[i].suffix);
return buf;
}
ENOKI_EXPORT void* cuda_malloc_zero(size_t size) {
void *result = cuda_malloc(size);
cuda_check(cudaMemsetAsync(result, 0, size));
return result;
}
template <typename T> __global__ void fill(size_t n, T value, T *out) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x)
out[i] = value;
}
template <typename T> __global__ void set_value(T *ptr, size_t idx, T value) {
ptr[idx] = value;
}
ENOKI_EXPORT void* cuda_malloc_fill(size_t size, uint8_t value) {
uint8_t *result = (uint8_t *) cuda_malloc(size);
cuda_check(cudaMemsetAsync(result, value, size));
return result;
}
ENOKI_EXPORT void* cuda_malloc_fill(size_t size, uint16_t value) {
uint16_t *result = (uint16_t *) cuda_malloc(size * sizeof(uint16_t));
fill<<<256, 256>>>(size, value, result);
return result;
}
ENOKI_EXPORT void* cuda_malloc_fill(size_t size, uint32_t value) {
uint32_t *result = (uint32_t *) cuda_malloc(size * sizeof(uint32_t));
fill<<<256, 256>>>(size, value, result);
return result;
}
ENOKI_EXPORT void* cuda_malloc_fill(size_t size, uint64_t value) {
uint64_t *result = (uint64_t *) cuda_malloc(size * sizeof(uint64_t));
fill<<<256, 256>>>(size, value, result);
return result;
}
ENOKI_EXPORT void cuda_memcpy_to_device(void *dst, const void *src, size_t size) {
cuda_check(cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice));
}
ENOKI_EXPORT void cuda_memcpy_from_device(void *dst, const void *src, size_t size) {
cuda_check(cudaMemcpy(dst, src, size, cudaMemcpyDeviceToHost));
}
ENOKI_EXPORT void cuda_memcpy_to_device_async(void *dst, const void *src, size_t size) {
cuda_check(cudaMemcpyAsync(dst, src, size, cudaMemcpyHostToDevice));
}
ENOKI_EXPORT void cuda_memcpy_from_device_async(void *dst, const void *src, size_t size) {
cuda_check(cudaMemcpyAsync(dst, src, size, cudaMemcpyDeviceToHost));
}
ENOKI_EXPORT void cuda_mem_get_info(size_t *free, size_t *total) {
cuda_check(cudaMemGetInfo(free, total));
}
struct CUDAErrorList {
CUresult id;
const char *value;
};
static CUDAErrorList __cuda_error_list[] = {
{ CUDA_SUCCESS,
"CUDA_SUCCESS"},
{ CUDA_ERROR_INVALID_VALUE,
"CUDA_ERROR_INVALID_VALUE"},
{ CUDA_ERROR_OUT_OF_MEMORY,
"CUDA_ERROR_OUT_OF_MEMORY"},
{ CUDA_ERROR_NOT_INITIALIZED,
"CUDA_ERROR_NOT_INITIALIZED"},
{ CUDA_ERROR_DEINITIALIZED,
"CUDA_ERROR_DEINITIALIZED"},
{ CUDA_ERROR_PROFILER_DISABLED,
"CUDA_ERROR_PROFILER_DISABLED"},
{ CUDA_ERROR_PROFILER_NOT_INITIALIZED,
"CUDA_ERROR_PROFILER_NOT_INITIALIZED"},
{ CUDA_ERROR_PROFILER_ALREADY_STARTED,
"CUDA_ERROR_PROFILER_ALREADY_STARTED"},
{ CUDA_ERROR_PROFILER_ALREADY_STOPPED,
"CUDA_ERROR_PROFILER_ALREADY_STOPPED"},
{ CUDA_ERROR_NO_DEVICE,
"CUDA_ERROR_NO_DEVICE"},
{ CUDA_ERROR_INVALID_DEVICE,
"CUDA_ERROR_INVALID_DEVICE"},
{ CUDA_ERROR_INVALID_IMAGE,
"CUDA_ERROR_INVALID_IMAGE"},
{ CUDA_ERROR_INVALID_CONTEXT,
"CUDA_ERROR_INVALID_CONTEXT"},
{ CUDA_ERROR_CONTEXT_ALREADY_CURRENT,
"CUDA_ERROR_CONTEXT_ALREADY_CURRENT"},
{ CUDA_ERROR_MAP_FAILED,
"CUDA_ERROR_MAP_FAILED"},
{ CUDA_ERROR_UNMAP_FAILED,
"CUDA_ERROR_UNMAP_FAILED"},
{ CUDA_ERROR_ARRAY_IS_MAPPED,
"CUDA_ERROR_ARRAY_IS_MAPPED"},
{ CUDA_ERROR_ALREADY_MAPPED,
"CUDA_ERROR_ALREADY_MAPPED"},
{ CUDA_ERROR_NO_BINARY_FOR_GPU,
"CUDA_ERROR_NO_BINARY_FOR_GPU"},
{ CUDA_ERROR_ALREADY_ACQUIRED,
"CUDA_ERROR_ALREADY_ACQUIRED"},
{ CUDA_ERROR_NOT_MAPPED,
"CUDA_ERROR_NOT_MAPPED"},
{ CUDA_ERROR_NOT_MAPPED_AS_ARRAY,
"CUDA_ERROR_NOT_MAPPED_AS_ARRAY"},
{ CUDA_ERROR_NOT_MAPPED_AS_POINTER,
"CUDA_ERROR_NOT_MAPPED_AS_POINTER"},
{ CUDA_ERROR_ECC_UNCORRECTABLE,
"CUDA_ERROR_ECC_UNCORRECTABLE"},
{ CUDA_ERROR_UNSUPPORTED_LIMIT,
"CUDA_ERROR_UNSUPPORTED_LIMIT"},
{ CUDA_ERROR_CONTEXT_ALREADY_IN_USE,
"CUDA_ERROR_CONTEXT_ALREADY_IN_USE"},
{ CUDA_ERROR_PEER_ACCESS_UNSUPPORTED,
"CUDA_ERROR_PEER_ACCESS_UNSUPPORTED"},
{ CUDA_ERROR_INVALID_PTX,
"CUDA_ERROR_INVALID_PTX"},
{ CUDA_ERROR_INVALID_GRAPHICS_CONTEXT,
"CUDA_ERROR_INVALID_GRAPHICS_CONTEXT"},
{ CUDA_ERROR_NVLINK_UNCORRECTABLE,
"CUDA_ERROR_NVLINK_UNCORRECTABLE"},
{ CUDA_ERROR_JIT_COMPILER_NOT_FOUND,
"CUDA_ERROR_JIT_COMPILER_NOT_FOUND"},
{ CUDA_ERROR_INVALID_SOURCE,
"CUDA_ERROR_INVALID_SOURCE"},
{ CUDA_ERROR_FILE_NOT_FOUND,
"CUDA_ERROR_FILE_NOT_FOUND"},
{ CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND,
"CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND"},
{ CUDA_ERROR_SHARED_OBJECT_INIT_FAILED,
"CUDA_ERROR_SHARED_OBJECT_INIT_FAILED"},
{ CUDA_ERROR_OPERATING_SYSTEM,
"CUDA_ERROR_OPERATING_SYSTEM"},
{ CUDA_ERROR_INVALID_HANDLE,
"CUDA_ERROR_INVALID_HANDLE"},
{ CUDA_ERROR_NOT_FOUND,
"CUDA_ERROR_NOT_FOUND"},
{ CUDA_ERROR_NOT_READY,
"CUDA_ERROR_NOT_READY"},
{ CUDA_ERROR_ILLEGAL_ADDRESS,
"CUDA_ERROR_ILLEGAL_ADDRESS"},
{ CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,
"CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES"},
{ CUDA_ERROR_LAUNCH_TIMEOUT,
"CUDA_ERROR_LAUNCH_TIMEOUT"},
{ CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,
"CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING"},
{ CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED,
"CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED"},
{ CUDA_ERROR_PEER_ACCESS_NOT_ENABLED,
"CUDA_ERROR_PEER_ACCESS_NOT_ENABLED"},
{ CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE,
"CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE"},
{ CUDA_ERROR_CONTEXT_IS_DESTROYED,
"CUDA_ERROR_CONTEXT_IS_DESTROYED"},
{ CUDA_ERROR_ASSERT,
"CUDA_ERROR_ASSERT"},
{ CUDA_ERROR_TOO_MANY_PEERS,
"CUDA_ERROR_TOO_MANY_PEERS"},
{ CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED,
"CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED"},
{ CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED,
"CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED"},
{ CUDA_ERROR_HARDWARE_STACK_ERROR,
"CUDA_ERROR_HARDWARE_STACK_ERROR"},
{ CUDA_ERROR_ILLEGAL_INSTRUCTION,
"CUDA_ERROR_ILLEGAL_INSTRUCTION"},
{ CUDA_ERROR_MISALIGNED_ADDRESS,
"CUDA_ERROR_MISALIGNED_ADDRESS"},
{ CUDA_ERROR_INVALID_ADDRESS_SPACE,
"CUDA_ERROR_INVALID_ADDRESS_SPACE"},
{ CUDA_ERROR_INVALID_PC,
"CUDA_ERROR_INVALID_PC"},
{ CUDA_ERROR_LAUNCH_FAILED,
"CUDA_ERROR_LAUNCH_FAILED"},
{ CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE,
"CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE"},
{ CUDA_ERROR_NOT_PERMITTED,
"CUDA_ERROR_NOT_PERMITTED"},
{ CUDA_ERROR_NOT_SUPPORTED,
"CUDA_ERROR_NOT_SUPPORTED"},
{ CUDA_ERROR_UNKNOWN,
"CUDA_ERROR_UNKNOWN"},
{ (CUresult) -1, nullptr }
};
ENOKI_EXPORT const char *cuda_error_string(CUresult id) {
int index = 0;
while (__cuda_error_list[index].id != id &&
__cuda_error_list[index].id != (CUresult) -1)
index++;
if (__cuda_error_list[index].id == id)
return __cuda_error_list[index].value;
else
return "Invalid CUDA error status!";
}
ENOKI_EXPORT void cuda_check_impl(CUresult errval, const char *file, const int line) {
if (errval != CUDA_SUCCESS && errval != CUDA_ERROR_DEINITIALIZED) {
const char *err_msg = cuda_error_string(errval);
fprintf(stderr,
"cuda_check(): driver API error = %04d \"%s\" in "
"%s:%i.\n", (int) errval, err_msg, file, line);
exit(EXIT_FAILURE);
}
}
ENOKI_EXPORT void cuda_check_impl(cudaError_t errval, const char *file, const int line) {
if (errval != cudaSuccess && errval != cudaErrorCudartUnloading) {
const char *err_msg = cudaGetErrorName(errval);
fprintf(stderr,
"cuda_check(): runtime API error = %04d \"%s\" in "
"%s:%i.\n", (int) errval, err_msg, file, line);
exit(EXIT_FAILURE);
}
}
NAMESPACE_END(enoki)
|
eef17b423afbc95c6838287b37118db1c02896f8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#define BLOCKSIZE 256
__global__ void kern_set_val (float *gpu_ptr, float value, int nb) {
int i;
i=blockDim.x * blockIdx.x+threadIdx.x;
gpu_ptr[i] = value;
}
extern "C" void set_value (float *ptr, float value, int nb) {
float *gpu_ptr;
hipMalloc (&gpu_ptr, sizeof(float)*nb);
//UP TO YOU : write kernel invocation here
hipLaunchKernelGGL(( kern_set_val) , dim3(nb/BLOCKSIZE),dim3(BLOCKSIZE), 0, 0, gpu_ptr,value,nb);
hipDeviceSynchronize ();
hipMemcpy(ptr,gpu_ptr,nb*sizeof(float),hipMemcpyDeviceToHost);
hipFree (gpu_ptr);
}
|
eef17b423afbc95c6838287b37118db1c02896f8.cu
|
#include <cuda.h>
#include <stdio.h>
#define BLOCKSIZE 256
__global__ void kern_set_val (float *gpu_ptr, float value, int nb) {
int i;
i=blockDim.x * blockIdx.x+threadIdx.x;
gpu_ptr[i] = value;
}
extern "C" void set_value (float *ptr, float value, int nb) {
float *gpu_ptr;
cudaMalloc (&gpu_ptr, sizeof(float)*nb);
//UP TO YOU : write kernel invocation here
kern_set_val <<<nb/BLOCKSIZE,BLOCKSIZE>>>(gpu_ptr,value,nb);
cudaThreadSynchronize ();
cudaMemcpy(ptr,gpu_ptr,nb*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree (gpu_ptr);
}
|
0eabc0531fc9ee351eebbac3669793520f98fb81.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/transpose_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void transpose_gpu(const int nthreads, const Dtype* from_data, Dtype* to_data,
const int* from_counts, const int* to_counts, const int* map, const int num_axes, int* buf) {
CUDA_KERNEL_LOOP(index, nthreads) {
int* from_inds=buf + index * num_axes;
int from_index = index, to_index = 0;
for(int i = 0; i < num_axes; i++) {
from_inds[i] = from_index / from_counts[i];
from_index = from_index % from_counts[i];
}
for(int i = 0; i < num_axes; i++) {
to_index += from_inds[map[i]] * to_counts[i];
}
*(to_data + to_index) = *(from_data + index);
}
}
template <typename Dtype>
void TransposeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int nthreads = bottom[0]->count();
transpose_gpu<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom[0]->gpu_data(), top[0]->mutable_gpu_data(),
bottom_counts_.gpu_data(), top_counts_.gpu_data(), forward_map_.gpu_data(),
bottom[0]->shape().size(), buf_.mutable_gpu_data());
}
template <typename Dtype>
void TransposeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const int nthreads = bottom[0]->count();
transpose_gpu<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff(),
top_counts_.gpu_data(), bottom_counts_.gpu_data(), backward_map_.gpu_data(),
bottom[0]->shape().size(), buf_.mutable_gpu_data());
}
INSTANTIATE_LAYER_GPU_FUNCS(TransposeLayer);
} // namespace caffe
|
0eabc0531fc9ee351eebbac3669793520f98fb81.cu
|
#include <vector>
#include "caffe/layers/transpose_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void transpose_gpu(const int nthreads, const Dtype* from_data, Dtype* to_data,
const int* from_counts, const int* to_counts, const int* map, const int num_axes, int* buf) {
CUDA_KERNEL_LOOP(index, nthreads) {
int* from_inds=buf + index * num_axes;
int from_index = index, to_index = 0;
for(int i = 0; i < num_axes; i++) {
from_inds[i] = from_index / from_counts[i];
from_index = from_index % from_counts[i];
}
for(int i = 0; i < num_axes; i++) {
to_index += from_inds[map[i]] * to_counts[i];
}
*(to_data + to_index) = *(from_data + index);
}
}
template <typename Dtype>
void TransposeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int nthreads = bottom[0]->count();
transpose_gpu<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom[0]->gpu_data(), top[0]->mutable_gpu_data(),
bottom_counts_.gpu_data(), top_counts_.gpu_data(), forward_map_.gpu_data(),
bottom[0]->shape().size(), buf_.mutable_gpu_data());
}
template <typename Dtype>
void TransposeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const int nthreads = bottom[0]->count();
transpose_gpu<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff(),
top_counts_.gpu_data(), bottom_counts_.gpu_data(), backward_map_.gpu_data(),
bottom[0]->shape().size(), buf_.mutable_gpu_data());
}
INSTANTIATE_LAYER_GPU_FUNCS(TransposeLayer);
} // namespace caffe
|
d3cdda3bce17d2774e6e73ec1e1989031e047dff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
struct MyStruct {
float floatvalue;
int intvalue;
};
// __device__ __host__ float sumStruct(struct MyStruct **p_structs, int N) {
// float sum = 0;
// for(int i = 0; i < N; i++) {
// struct MyStruct *mystruct = p_structs[i];
// sum += mystruct->floatvalue + float(mystruct->intvalue) * 3.5f;
// }
// return sum;
// }
// __global__ void mykernel(float *data, MyStruct *structs, int N) {
// data[0] = sumStruct(&structs, N);
// data[3] = sumStruct(&structs, 123);
// data[4] = sumStruct(&structs, 12300);
// }
|
d3cdda3bce17d2774e6e73ec1e1989031e047dff.cu
|
struct MyStruct {
float floatvalue;
int intvalue;
};
// __device__ __host__ float sumStruct(struct MyStruct **p_structs, int N) {
// float sum = 0;
// for(int i = 0; i < N; i++) {
// struct MyStruct *mystruct = p_structs[i];
// sum += mystruct->floatvalue + float(mystruct->intvalue) * 3.5f;
// }
// return sum;
// }
// __global__ void mykernel(float *data, MyStruct *structs, int N) {
// data[0] = sumStruct(&structs, N);
// data[3] = sumStruct(&structs, 123);
// data[4] = sumStruct(&structs, 12300);
// }
|
b3c5b2c48e4fa5faf8e23cf24d299321972b590e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "query_ball_point_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int b = 2;
int n = XSIZE*YSIZE;
int m = 2;
const float *radius = NULL;
hipMalloc(&radius, XSIZE*YSIZE);
int nsample = 1;
const float *xyz1 = NULL;
hipMalloc(&xyz1, XSIZE*YSIZE);
const float *xyz2 = NULL;
hipMalloc(&xyz2, XSIZE*YSIZE);
int *idx = NULL;
hipMalloc(&idx, XSIZE*YSIZE);
int *pts_cnt = NULL;
hipMalloc(&pts_cnt, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
query_ball_point_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
query_ball_point_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
query_ball_point_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
b3c5b2c48e4fa5faf8e23cf24d299321972b590e.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "query_ball_point_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int b = 2;
int n = XSIZE*YSIZE;
int m = 2;
const float *radius = NULL;
cudaMalloc(&radius, XSIZE*YSIZE);
int nsample = 1;
const float *xyz1 = NULL;
cudaMalloc(&xyz1, XSIZE*YSIZE);
const float *xyz2 = NULL;
cudaMalloc(&xyz2, XSIZE*YSIZE);
int *idx = NULL;
cudaMalloc(&idx, XSIZE*YSIZE);
int *pts_cnt = NULL;
cudaMalloc(&pts_cnt, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
query_ball_point_gpu<<<gridBlock,threadBlock>>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
query_ball_point_gpu<<<gridBlock,threadBlock>>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
query_ball_point_gpu<<<gridBlock,threadBlock>>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
4cb3458485a6729c93297d8ea8b8fae8ce41d6a7.hip
|
// !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run matrix multiplication kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU.
Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing
high performance kernels at scale which works for multiple problem sizes with good abstractions is
really hard. CUTLASS solves this problem by providing simplified abstractions to compose
multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU
easily.
CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp
and thread-block level, they compute on their own tile-size with higher level of tile sizes being
composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used
to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute
threadblock-tile (tile size computed by a threadblock).
In thie example, we split variable initialization into
1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel
can view them (logical to physical mapping)
2. Setting up computation properties : describes how the above set matrices will be used to compute
output of matrix multiplication.
First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for
GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the
rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise
operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for
alpha and beta to be equal to ElementComputeEpilogue = int32_t. As we want to use MMA instructions
on Turing and they support 8-bit signed integer (int8_t), we use data type for elements in input
matrix A and B as int8_t. Volta also supports accumulation of partial dot product to int32_t, which
can store wider range of numbers, we use it as data type of output matrix elements and accumulation.
We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t),
ElementComputeEpilogue (int32_t), ElementInputA (int8_t), ElementInputB (int8_t), ElementOutput
(int32_t). Communicating just the data type is not enough. As the data is laid out linearly in
memory, we have to convey the layout of matrices. We do that by initializing template variable
LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row
major. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel.
We initialize template variable EpilogueOp, which takes the data type of output ElementOutput
(int32_t), the number of elements per vector memory access (16), data type of accumulator (int32_t)
and data type of computation of linear combination (alpha * X + beta * C).
Now that we setup the properties of data, we have to setup properties of computation.
Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x256x64,
64x64x16, 8x8x16 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally
deduce the amount of threads needed per thread-block, amount of shared memory, storing data in
bank-conflict free manner, and ton of other variables required to compose, intialize and launch a
high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from
understanding and coding complicated hardware optimizations which can easily go wrong.
CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines
constitute the whole process of loading input data from global memory to shared memory, loading data
from shared memory to registers, doing matrix multiplication, store to global memory. The below flow
sequence shows a typical mma pipeline.
matrix in global memory -> registers -> tile in shared memory -> registers -> mma -> registers ->
output to global memory
The problem with single pipeline is, each stage is synchronous which means, each stage has to wait
until the previous finished executing. There are stages in the pipeline which do not have fixed
latency, for example, the loads from global memory and shared memory. Therefore, we can add one more
pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads.
Finally, the pipeline in a kernel looks like
(1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5)
mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) matrix in global
memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers ->
(9) output to global memory
This way, you can hide the second global memoroy load latency by doing computation on already loaded
input data.
There are few more template variables initialized such as, which threadblock tile of output matrix
is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on.
These are all put together to create a template variable which describes CUTLASS GEMM kernel using
cutlass::gemm::device::Gemm template.
The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it.
We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come
in the way of learning CUTLASS.
Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS
kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the
important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space
memory required by the kernel we instantiated. If yes, we create it and pass it along with other
arguments created to intialize CUTLASS kernel then, the kernel is launched.
In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if
the output from CUTLASS kernel is same as reference GEMM kernel.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = int32_t; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = int8_t; // <- data type of elements in input matrix A
using ElementInputB = int8_t; // <- data type of elements in input matrix B
using ElementOutput = int32_t; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm75;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 256, 64>; // <- threadblock tile M = 128, N = 256, K = 64
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 64>; // <- warp tile M = 64, N = 64, K = 64
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 16>; // <- MMA Op tile M = 8, N = 8, K = 16
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 2;
using Gemm = cutlass::gemm::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
int run() {
// Turing Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 10.2.
//
// CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
return -1;
}
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 75)) {
std::cerr << "Turing Tensor Core operations must be run on a machine with compute capability at least 75."
<< std::endl;
return -1;
}
const int length_m = 5120;
const int length_n = 4096;
const int length_k = 4096;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Initialize CUTLASS kernel with arguments and workspace pointer
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
beta,
tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
hipDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
// Turing Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 10.2.
//
// CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
// Returning zero so this test passes when built on older Toolkits.
return 0;
}
else {
return run();
}
}
|
4cb3458485a6729c93297d8ea8b8fae8ce41d6a7.cu
|
/***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run matrix multiplication kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU.
Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing
high performance kernels at scale which works for multiple problem sizes with good abstractions is
really hard. CUTLASS solves this problem by providing simplified abstractions to compose
multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU
easily.
CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp
and thread-block level, they compute on their own tile-size with higher level of tile sizes being
composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used
to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute
threadblock-tile (tile size computed by a threadblock).
In thie example, we split variable initialization into
1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel
can view them (logical to physical mapping)
2. Setting up computation properties : describes how the above set matrices will be used to compute
output of matrix multiplication.
First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for
GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the
rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise
operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for
alpha and beta to be equal to ElementComputeEpilogue = int32_t. As we want to use MMA instructions
on Turing and they support 8-bit signed integer (int8_t), we use data type for elements in input
matrix A and B as int8_t. Volta also supports accumulation of partial dot product to int32_t, which
can store wider range of numbers, we use it as data type of output matrix elements and accumulation.
We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t),
ElementComputeEpilogue (int32_t), ElementInputA (int8_t), ElementInputB (int8_t), ElementOutput
(int32_t). Communicating just the data type is not enough. As the data is laid out linearly in
memory, we have to convey the layout of matrices. We do that by initializing template variable
LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row
major. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel.
We initialize template variable EpilogueOp, which takes the data type of output ElementOutput
(int32_t), the number of elements per vector memory access (16), data type of accumulator (int32_t)
and data type of computation of linear combination (alpha * X + beta * C).
Now that we setup the properties of data, we have to setup properties of computation.
Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x256x64,
64x64x16, 8x8x16 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally
deduce the amount of threads needed per thread-block, amount of shared memory, storing data in
bank-conflict free manner, and ton of other variables required to compose, intialize and launch a
high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from
understanding and coding complicated hardware optimizations which can easily go wrong.
CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines
constitute the whole process of loading input data from global memory to shared memory, loading data
from shared memory to registers, doing matrix multiplication, store to global memory. The below flow
sequence shows a typical mma pipeline.
matrix in global memory -> registers -> tile in shared memory -> registers -> mma -> registers ->
output to global memory
The problem with single pipeline is, each stage is synchronous which means, each stage has to wait
until the previous finished executing. There are stages in the pipeline which do not have fixed
latency, for example, the loads from global memory and shared memory. Therefore, we can add one more
pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads.
Finally, the pipeline in a kernel looks like
(1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5)
mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) matrix in global
memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers ->
(9) output to global memory
This way, you can hide the second global memoroy load latency by doing computation on already loaded
input data.
There are few more template variables initialized such as, which threadblock tile of output matrix
is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on.
These are all put together to create a template variable which describes CUTLASS GEMM kernel using
cutlass::gemm::device::Gemm template.
The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it.
We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come
in the way of learning CUTLASS.
Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS
kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the
important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space
memory required by the kernel we instantiated. If yes, we create it and pass it along with other
arguments created to intialize CUTLASS kernel then, the kernel is launched.
In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if
the output from CUTLASS kernel is same as reference GEMM kernel.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = int32_t; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = int8_t; // <- data type of elements in input matrix A
using ElementInputB = int8_t; // <- data type of elements in input matrix B
using ElementOutput = int32_t; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm75;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 256, 64>; // <- threadblock tile M = 128, N = 256, K = 64
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 64>; // <- warp tile M = 64, N = 64, K = 64
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 16>; // <- MMA Op tile M = 8, N = 8, K = 16
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 2;
using Gemm = cutlass::gemm::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
int run() {
// Turing Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 10.2.
//
// CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
return -1;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 75)) {
std::cerr << "Turing Tensor Core operations must be run on a machine with compute capability at least 75."
<< std::endl;
return -1;
}
const int length_m = 5120;
const int length_n = 4096;
const int length_k = 4096;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Initialize CUTLASS kernel with arguments and workspace pointer
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
beta,
tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
// Turing Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 10.2.
//
// CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
// Returning zero so this test passes when built on older Toolkits.
return 0;
}
else {
return run();
}
}
|
f4086e58fada3e120b1330e3daecb33181a56379.hip
|
// !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_pr.cu
*
* @brief Simple test driver program for computing Pagerank.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <cstdlib>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/pr/pr_enactor.cuh>
#include <gunrock/app/pr/pr_problem.cuh>
#include <gunrock/app/pr/pr_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/page_rank.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::pr;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
template <typename VertexId, typename Value>
struct RankPair {
VertexId vertex_id;
Value page_rank;
RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {}
};
template<typename RankPair>
bool PRCompare(
RankPair elem1,
RankPair elem2)
{
return elem1.page_rank > elem2.page_rank;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf("\ntest_pr <graph type> <graph type args> [--device=<device_index>] "
"[--undirected] [--instrumented] [--quick] "
"[--v]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --undirected If set then treat the graph as undirected.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code.\n"
);
}
/**
* @brief Displays the PageRank result
*
* @param[in] node_id Node vertex Id
* @param[in] rank Rank value for the node
* @param[in] nodes Number of nodes in the graph.
*/
template<typename VertexId, typename Value, typename SizeT>
void DisplaySolution(VertexId *node_id, Value *rank, SizeT nodes)
{
// Print out at most top 10 largest components
int top = (nodes < 10) ? nodes : 10;
printf("Top %d Page Ranks:\n", top);
for (int i = 0; i < top; ++i)
{
printf("Vertex ID: %d, Page Rank: %5f\n", node_id[i], rank[i]);
}
}
/**
* Performance/Evaluation statistics
*/
struct Stats
{
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) :
name(name), rate(), search_depth(), redundant_work(), duty() {}
};
/**
* @brief Displays timing and correctness statistics
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to the Stats object defined in RunTests
* @param[in] h_rank Host-side vector stores computed page rank values for validation
* @param[in] graph Reference to the CSR graph we process on
* @param[in] elapsed Total elapsed kernel running time
* @param[in] total_queued Total element queued in PageRank kernel running process
* @param[in] avg_duty Average duty of the PageRankv kernels
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void DisplayStats(
Stats &stats,
Value *h_rank,
const Csr<VertexId, Value, SizeT> &graph,
double elapsed,
long long total_queued,
double avg_duty)
{
// Display test name
printf("[%s] finished. ", stats.name);
// Display the specific sample statistics
printf(" elapsed: %.3f ms", elapsed);
if (avg_duty != 0)
{
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n");
}
/******************************************************************************
* PageRank Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference Page Rank implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] node_id Source node for personalized PageRank (if any)
* @param[in] rank Host-side vector to store CPU computed labels for each node
* @param[in] delta delta for computing PR
* @param[in] error error threshold
* @param[in] max_iter max iteration to go
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferencePr(
const Csr<VertexId, Value, SizeT> &graph,
VertexId *node_id,
Value *rank,
Value delta,
Value error,
SizeT max_iter,
bool directed)
{
using namespace boost;
//Preparation
typedef adjacency_list<vecS, vecS, bidirectionalS,
no_property, property<edge_index_t, int> > Graph;
Graph g;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
Graph::edge_descriptor e =
add_edge(i, graph.column_indices[j], g).first;
put(edge_index, g, e, i);
}
}
//
//compute page rank
//
CpuTimer cpu_timer;
cpu_timer.Start();
if (!directed)
{
remove_dangling_links(g);
printf("finished remove dangling links.\n");
}
std::vector<Value> ranks(num_vertices(g));
page_rank(g, make_iterator_property_map(ranks.begin(),
get(boost::vertex_index, g)),
boost::graph::n_iterations(max_iter));
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
rank[i] = ranks[i];
}
//sort the top page ranks
RankPair<SizeT, Value> *pr_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
pr_list[i].vertex_id = i;
pr_list[i].page_rank = rank[i];
}
std::stable_sort(
pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >);
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = pr_list[i].vertex_id;
rank[i] = pr_list[i].page_rank;
}
free(pr_list);
printf("CPU PageRank finished in %lf msec.\n", elapsed);
}
/**
* @brief Run PR tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node for personalized PageRank (if any)
* @param[in] delta Delta value for computing PageRank, usually set to .85
* @param[in] error Error threshold value
* @param[in] max_iter Max iteration for Page Rank computing
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] iterations Number of iterations for running the test
* @param[in] context CudaContext for moderngpu to use
*
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
Value delta,
Value error,
SizeT max_iter,
int max_grid_size,
int num_gpus,
int iterations,
CudaContext& context)
{
typedef PRProblem<
VertexId,
SizeT,
Value> Problem;
// Allocate host-side label array (for both reference and gpu-computed results)
Value *reference_rank = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_rank = (Value*)malloc(sizeof(Value) * graph.nodes);
VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_node_id = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
Value *reference_check = (g_quick) ? NULL : reference_rank;
// Allocate PageRank enactor map
PREnactor<INSTRUMENT> pr_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"Problem pr Initialization Failed", __FILE__, __LINE__);
Stats *stats = new Stats("GPU PageRank");
long long total_queued = 0;
double avg_duty = 0.0;
// Perform PageRank
GpuTimer gpu_timer;
float elapsed = 0.0f;
for (int iter = 0; iter < iterations; ++iter)
{
util::GRError(
csr_problem->Reset(src, delta, error, pr_enactor.GetFrontierType()),
"pr Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
pr_enactor.template Enact<Problem>(
context, csr_problem, max_iter, max_grid_size),
"pr Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
}
elapsed /= iterations;
pr_enactor.GetStatistics(total_queued, avg_duty);
// Copy out results
util::GRError(
csr_problem->Extract(h_rank, h_node_id),
"PageRank Problem Data Extraction Failed", __FILE__, __LINE__);
float total_pr = 0;
for (int i = 0; i < graph.nodes; ++i)
{
total_pr += h_rank[i];
}
//
// Compute reference CPU PR solution for source-distance
//
if (reference_check != NULL && total_pr > 0)
{
printf("Computing reference value ...\n");
SimpleReferencePr(
graph,
reference_node_id,
reference_check,
delta,
error,
max_iter,
!g_undirected);
printf("\n");
}
// Verify the result
if (reference_check != NULL && total_pr > 0)
{
printf("Validity: ");
CompareResults(h_rank, reference_check, graph.nodes, true);
}
printf("\nFirst 40 labels of the GPU result.");
// Display Solution
DisplaySolution(h_node_id, h_rank, graph.nodes);
DisplayStats(
*stats,
h_rank,
graph,
elapsed,
total_queued,
avg_duty);
// Cleanup
delete stats;
if (csr_problem) delete csr_problem;
if (reference_check) free(reference_check);
if (h_rank) free(h_rank);
hipDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] args Reference to the command line arguments
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args,
CudaContext& context)
{
Value delta = 0.85f; // Use whatever the specified graph-type's default is
Value error = 0.01f; // Error threshold
SizeT max_iter = 20;
bool instrumented = false; // Whether or not to collect instrumentation from kernels
int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
VertexId src = -1;
int iterations = 1;
instrumented = args.CheckCmdLineFlag("instrumented");
args.GetCmdLineArgument("delta", delta);
args.GetCmdLineArgument("error", error);
args.GetCmdLineArgument("max-iter", max_iter);
args.GetCmdLineArgument("src", src);
args.GetCmdLineArgument("iteration-num", iterations);
g_quick = args.CheckCmdLineFlag("quick");
g_verbose = args.CheckCmdLineFlag("v");
if (instrumented)
{
RunTests<VertexId, Value, SizeT, true>(
graph,
src,
delta,
error,
max_iter,
max_grid_size,
num_gpus,
iterations,
context);
}
else
{
RunTests<VertexId, Value, SizeT, false>(
graph,
src,
delta,
error,
max_iter,
max_grid_size,
num_gpus,
iterations,
context);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
//DeviceInit(args);
//hipSetDeviceFlags(hipDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
g_undirected = args.CheckCmdLineFlag("undirected");
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
csr.PrintHistogram();
// Run tests
RunTests(csr, args, *context);
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
|
f4086e58fada3e120b1330e3daecb33181a56379.cu
|
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_pr.cu
*
* @brief Simple test driver program for computing Pagerank.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <cstdlib>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/pr/pr_enactor.cuh>
#include <gunrock/app/pr/pr_problem.cuh>
#include <gunrock/app/pr/pr_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/page_rank.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::pr;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
template <typename VertexId, typename Value>
struct RankPair {
VertexId vertex_id;
Value page_rank;
RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {}
};
template<typename RankPair>
bool PRCompare(
RankPair elem1,
RankPair elem2)
{
return elem1.page_rank > elem2.page_rank;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf("\ntest_pr <graph type> <graph type args> [--device=<device_index>] "
"[--undirected] [--instrumented] [--quick] "
"[--v]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --undirected If set then treat the graph as undirected.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code.\n"
);
}
/**
* @brief Displays the PageRank result
*
* @param[in] node_id Node vertex Id
* @param[in] rank Rank value for the node
* @param[in] nodes Number of nodes in the graph.
*/
template<typename VertexId, typename Value, typename SizeT>
void DisplaySolution(VertexId *node_id, Value *rank, SizeT nodes)
{
// Print out at most top 10 largest components
int top = (nodes < 10) ? nodes : 10;
printf("Top %d Page Ranks:\n", top);
for (int i = 0; i < top; ++i)
{
printf("Vertex ID: %d, Page Rank: %5f\n", node_id[i], rank[i]);
}
}
/**
* Performance/Evaluation statistics
*/
struct Stats
{
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) :
name(name), rate(), search_depth(), redundant_work(), duty() {}
};
/**
* @brief Displays timing and correctness statistics
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to the Stats object defined in RunTests
* @param[in] h_rank Host-side vector stores computed page rank values for validation
* @param[in] graph Reference to the CSR graph we process on
* @param[in] elapsed Total elapsed kernel running time
* @param[in] total_queued Total element queued in PageRank kernel running process
* @param[in] avg_duty Average duty of the PageRankv kernels
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void DisplayStats(
Stats &stats,
Value *h_rank,
const Csr<VertexId, Value, SizeT> &graph,
double elapsed,
long long total_queued,
double avg_duty)
{
// Display test name
printf("[%s] finished. ", stats.name);
// Display the specific sample statistics
printf(" elapsed: %.3f ms", elapsed);
if (avg_duty != 0)
{
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n");
}
/******************************************************************************
* PageRank Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference Page Rank implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] node_id Source node for personalized PageRank (if any)
* @param[in] rank Host-side vector to store CPU computed labels for each node
* @param[in] delta delta for computing PR
* @param[in] error error threshold
* @param[in] max_iter max iteration to go
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferencePr(
const Csr<VertexId, Value, SizeT> &graph,
VertexId *node_id,
Value *rank,
Value delta,
Value error,
SizeT max_iter,
bool directed)
{
using namespace boost;
//Preparation
typedef adjacency_list<vecS, vecS, bidirectionalS,
no_property, property<edge_index_t, int> > Graph;
Graph g;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
Graph::edge_descriptor e =
add_edge(i, graph.column_indices[j], g).first;
put(edge_index, g, e, i);
}
}
//
//compute page rank
//
CpuTimer cpu_timer;
cpu_timer.Start();
if (!directed)
{
remove_dangling_links(g);
printf("finished remove dangling links.\n");
}
std::vector<Value> ranks(num_vertices(g));
page_rank(g, make_iterator_property_map(ranks.begin(),
get(boost::vertex_index, g)),
boost::graph::n_iterations(max_iter));
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
rank[i] = ranks[i];
}
//sort the top page ranks
RankPair<SizeT, Value> *pr_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
pr_list[i].vertex_id = i;
pr_list[i].page_rank = rank[i];
}
std::stable_sort(
pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >);
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = pr_list[i].vertex_id;
rank[i] = pr_list[i].page_rank;
}
free(pr_list);
printf("CPU PageRank finished in %lf msec.\n", elapsed);
}
/**
* @brief Run PR tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node for personalized PageRank (if any)
* @param[in] delta Delta value for computing PageRank, usually set to .85
* @param[in] error Error threshold value
* @param[in] max_iter Max iteration for Page Rank computing
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] iterations Number of iterations for running the test
* @param[in] context CudaContext for moderngpu to use
*
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
Value delta,
Value error,
SizeT max_iter,
int max_grid_size,
int num_gpus,
int iterations,
CudaContext& context)
{
typedef PRProblem<
VertexId,
SizeT,
Value> Problem;
// Allocate host-side label array (for both reference and gpu-computed results)
Value *reference_rank = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_rank = (Value*)malloc(sizeof(Value) * graph.nodes);
VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_node_id = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
Value *reference_check = (g_quick) ? NULL : reference_rank;
// Allocate PageRank enactor map
PREnactor<INSTRUMENT> pr_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"Problem pr Initialization Failed", __FILE__, __LINE__);
Stats *stats = new Stats("GPU PageRank");
long long total_queued = 0;
double avg_duty = 0.0;
// Perform PageRank
GpuTimer gpu_timer;
float elapsed = 0.0f;
for (int iter = 0; iter < iterations; ++iter)
{
util::GRError(
csr_problem->Reset(src, delta, error, pr_enactor.GetFrontierType()),
"pr Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
pr_enactor.template Enact<Problem>(
context, csr_problem, max_iter, max_grid_size),
"pr Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
}
elapsed /= iterations;
pr_enactor.GetStatistics(total_queued, avg_duty);
// Copy out results
util::GRError(
csr_problem->Extract(h_rank, h_node_id),
"PageRank Problem Data Extraction Failed", __FILE__, __LINE__);
float total_pr = 0;
for (int i = 0; i < graph.nodes; ++i)
{
total_pr += h_rank[i];
}
//
// Compute reference CPU PR solution for source-distance
//
if (reference_check != NULL && total_pr > 0)
{
printf("Computing reference value ...\n");
SimpleReferencePr(
graph,
reference_node_id,
reference_check,
delta,
error,
max_iter,
!g_undirected);
printf("\n");
}
// Verify the result
if (reference_check != NULL && total_pr > 0)
{
printf("Validity: ");
CompareResults(h_rank, reference_check, graph.nodes, true);
}
printf("\nFirst 40 labels of the GPU result.");
// Display Solution
DisplaySolution(h_node_id, h_rank, graph.nodes);
DisplayStats(
*stats,
h_rank,
graph,
elapsed,
total_queued,
avg_duty);
// Cleanup
delete stats;
if (csr_problem) delete csr_problem;
if (reference_check) free(reference_check);
if (h_rank) free(h_rank);
cudaDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] args Reference to the command line arguments
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args,
CudaContext& context)
{
Value delta = 0.85f; // Use whatever the specified graph-type's default is
Value error = 0.01f; // Error threshold
SizeT max_iter = 20;
bool instrumented = false; // Whether or not to collect instrumentation from kernels
int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
VertexId src = -1;
int iterations = 1;
instrumented = args.CheckCmdLineFlag("instrumented");
args.GetCmdLineArgument("delta", delta);
args.GetCmdLineArgument("error", error);
args.GetCmdLineArgument("max-iter", max_iter);
args.GetCmdLineArgument("src", src);
args.GetCmdLineArgument("iteration-num", iterations);
g_quick = args.CheckCmdLineFlag("quick");
g_verbose = args.CheckCmdLineFlag("v");
if (instrumented)
{
RunTests<VertexId, Value, SizeT, true>(
graph,
src,
delta,
error,
max_iter,
max_grid_size,
num_gpus,
iterations,
context);
}
else
{
RunTests<VertexId, Value, SizeT, false>(
graph,
src,
delta,
error,
max_iter,
max_grid_size,
num_gpus,
iterations,
context);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
//DeviceInit(args);
//cudaSetDeviceFlags(cudaDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
g_undirected = args.CheckCmdLineFlag("undirected");
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
csr.PrintHistogram();
// Run tests
RunTests(csr, args, *context);
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
|
700834cbdc61b5d4fe7252b9d562cdf74700817f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "math_functions.h"
#include "common.h"
#include <cmath>
#include <cstdlib>
#include <cstring>
template <>
void c_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
(hipblasSgemm(Csingleton::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void c_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
(hipblasDgemm(Csingleton::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void c_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
(hipblasSgemv(Csingleton::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void c_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
(hipblasDgemv(Csingleton::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void c_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
(hipblasSaxpy(Csingleton::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void c_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
(hipblasDaxpy(Csingleton::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template<>
float c_gpu_dot<float>(const int N, const float* X, const float* Y){
float res;
hipblasSdot(Csingleton::cublas_handle(),N,X,1,Y,1,&res);
return res;
}
template<>
double c_gpu_dot<double>(const int N, const double* X, const double* Y){
double res;
hipblasDdot(Csingleton::cublas_handle(),N,X,1,Y,1,&res);
return res;
}
template<>
int c_gpu_dot<int>(const int N, const int * X, const int* Y){
printf("not implement\n");
return 1;
}
template<>
unsigned int c_gpu_dot<unsigned int>(const int N, const unsigned int* X, const unsigned int* Y){
printf("not implement\n");
return 1;
}
template<typename T>
__global__ void scalar_kernel(const int N, const T alpha, T* X){
CUDA_KERNEL_LOOP(index,N){
X[index] = X[index] * alpha;
}
}
template<>
void c_gpu_scalar<float>(const int N, const float alpha, float* X){
hipLaunchKernelGGL(( scalar_kernel<float>), dim3(C_GET_BLOCKS(N)),dim3(C_CUDA_NUM_THREADS), 0, 0, N, alpha, X);
}
template<>
void c_gpu_scalar<double>(const int N, const double alpha, double* X){
hipLaunchKernelGGL(( scalar_kernel<double>), dim3(C_GET_BLOCKS(N)),dim3(C_CUDA_NUM_THREADS), 0, 0, N, alpha, X);
}
template<>
void c_gpu_scalar<int>(const int N, const int alpha, int* X){
;
}
template<>
void c_gpu_scalar<unsigned int>(const int N, const unsigned int alpha, unsigned int* X){
;
}
template<typename T>
__global__ void soft_kernel(const int N, const T lambda, const T* X, T* Y){
CUDA_KERNEL_LOOP(index,N){
if(X[index] > lambda){
Y[index] = X[index] - lambda;
}
else if(X[index] < -lambda){
Y[index] = X[index] + lambda;
}
else{
Y[index] = 0;
}
}
}
template<>
void c_gpu_soft<float>(const int N, const float lambda, const float* X, float* Y){
hipLaunchKernelGGL(( soft_kernel<float>), dim3(C_GET_BLOCKS(N)),dim3(C_CUDA_NUM_THREADS), 0, 0, N,lambda,X,Y);
}
template<>
void c_gpu_soft<double>(const int N, const double lambda, const double* X, double* Y){
hipLaunchKernelGGL(( soft_kernel<double>), dim3(C_GET_BLOCKS(N)),dim3(C_CUDA_NUM_THREADS), 0, 0, N,lambda,X,Y);
}
template<>
void c_gpu_soft<int>(const int N, const int lambda, const int* X, int* Y){
}
template<>
void c_gpu_soft<unsigned int>(const int N, const unsigned int lambda, const unsigned int* X, unsigned int* Y){
}
|
700834cbdc61b5d4fe7252b9d562cdf74700817f.cu
|
#include "math_functions.h"
#include "common.h"
#include <cmath>
#include <cstdlib>
#include <cstring>
template <>
void c_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
(cublasSgemm(Csingleton::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void c_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
(cublasDgemm(Csingleton::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void c_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
(cublasSgemv(Csingleton::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void c_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
(cublasDgemv(Csingleton::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void c_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
(cublasSaxpy(Csingleton::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void c_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
(cublasDaxpy(Csingleton::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template<>
float c_gpu_dot<float>(const int N, const float* X, const float* Y){
float res;
cublasSdot(Csingleton::cublas_handle(),N,X,1,Y,1,&res);
return res;
}
template<>
double c_gpu_dot<double>(const int N, const double* X, const double* Y){
double res;
cublasDdot(Csingleton::cublas_handle(),N,X,1,Y,1,&res);
return res;
}
template<>
int c_gpu_dot<int>(const int N, const int * X, const int* Y){
printf("not implement\n");
return 1;
}
template<>
unsigned int c_gpu_dot<unsigned int>(const int N, const unsigned int* X, const unsigned int* Y){
printf("not implement\n");
return 1;
}
template<typename T>
__global__ void scalar_kernel(const int N, const T alpha, T* X){
CUDA_KERNEL_LOOP(index,N){
X[index] = X[index] * alpha;
}
}
template<>
void c_gpu_scalar<float>(const int N, const float alpha, float* X){
scalar_kernel<float><<<C_GET_BLOCKS(N),C_CUDA_NUM_THREADS>>>(N, alpha, X);
}
template<>
void c_gpu_scalar<double>(const int N, const double alpha, double* X){
scalar_kernel<double><<<C_GET_BLOCKS(N),C_CUDA_NUM_THREADS>>>(N, alpha, X);
}
template<>
void c_gpu_scalar<int>(const int N, const int alpha, int* X){
;
}
template<>
void c_gpu_scalar<unsigned int>(const int N, const unsigned int alpha, unsigned int* X){
;
}
template<typename T>
__global__ void soft_kernel(const int N, const T lambda, const T* X, T* Y){
CUDA_KERNEL_LOOP(index,N){
if(X[index] > lambda){
Y[index] = X[index] - lambda;
}
else if(X[index] < -lambda){
Y[index] = X[index] + lambda;
}
else{
Y[index] = 0;
}
}
}
template<>
void c_gpu_soft<float>(const int N, const float lambda, const float* X, float* Y){
soft_kernel<float><<<C_GET_BLOCKS(N),C_CUDA_NUM_THREADS>>>(N,lambda,X,Y);
}
template<>
void c_gpu_soft<double>(const int N, const double lambda, const double* X, double* Y){
soft_kernel<double><<<C_GET_BLOCKS(N),C_CUDA_NUM_THREADS>>>(N,lambda,X,Y);
}
template<>
void c_gpu_soft<int>(const int N, const int lambda, const int* X, int* Y){
}
template<>
void c_gpu_soft<unsigned int>(const int N, const unsigned int lambda, const unsigned int* X, unsigned int* Y){
}
|
7068883ad9640cc1e5da6e68ca3be86dfc9cb469.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* _reg_mutualinformation_kernels.cu
*
*
* Created by Marc Modat on 24/03/2009.
* Copyright (c) 2009, University College London. All rights reserved.
* Centre for Medical Image Computing (CMIC)
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
#ifndef _REG_MUTUALINFORMATION_kernels_CU
#define _REG_MUTUALINFORMATION_kernels_CU
#include <stdio.h>
#define COEFF_L 0.16666666f
#define COEFF_C 0.66666666f
#define COEFF_B 0.83333333f
__device__ __constant__ int c_VoxelNumber;
__device__ __constant__ int3 c_ImageSize;
// Bins: Need 4 values for max 4 channels.
__device__ __constant__ int c_firstTargetBin;
__device__ __constant__ int c_secondTargetBin;
__device__ __constant__ int c_firstResultBin;
__device__ __constant__ int c_secondResultBin;
__device__ __constant__ float4 c_Entropies;
__device__ __constant__ float c_NMI;
__device__ __constant__ int c_ActiveVoxelNumber;
texture<float, 3, hipReadModeElementType> firstTargetImageTexture;
texture<float, 1, hipReadModeElementType> firstResultImageTexture;
texture<float4, 1, hipReadModeElementType> firstResultImageGradientTexture;
texture<float, 1, hipReadModeElementType> histogramTexture;
texture<float4, 1, hipReadModeElementType> gradientImageTexture;
texture<int, 1, hipReadModeElementType> maskTexture;
/// Added for the multichannel stuff. We currently only support 2 target and 2 source channels.
/// So we need another texture for the second target and source channel respectively.
texture<float, 3, hipReadModeElementType> secondTargetImageTexture;
texture<float, 1, hipReadModeElementType> secondResultImageTexture;
texture<float4, 1, hipReadModeElementType> secondResultImageGradientTexture;
__device__ float GetBasisSplineValue(float x)
{
x=fabsf(x);
float value=0.0f;
if(x<2.0f)
if(x<1.0f)
value = 2.0f/3.0f + (0.5f*x-1.0f)*x*x;
else{
x-=2.0f;
value = -x*x*x/6.0f;
}
return value;
}
__device__ float GetBasisSplineDerivativeValue(float ori)
{
float x=fabsf(ori);
float value=0.0f;
if(x<2.0f)
if(x<1.0f)
value = (1.5f*x-2.0f)*ori;
else{
x-=2.0f;
value = -0.5f * x * x;
if(ori<0.0f)value =-value;
}
return value;
}
__global__ void reg_getVoxelBasedNMIGradientUsingPW_kernel(float4 *voxelNMIGradientArray_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_ActiveVoxelNumber){
const int targetIndex = tex1Dfetch(maskTexture,tid);
int tempIndex=targetIndex;
const int z = tempIndex/(c_ImageSize.x*c_ImageSize.y);
tempIndex -= z*c_ImageSize.x*c_ImageSize.y;
const int y = tempIndex/c_ImageSize.x;
const int x = tempIndex - y*c_ImageSize.x;
float targetImageValue = tex3D(firstTargetImageTexture,
((float)x+0.5f)/(float)c_ImageSize.x,
((float)y+0.5f)/(float)c_ImageSize.y,
((float)z+0.5f)/(float)c_ImageSize.z);
float resultImageValue = tex1Dfetch(firstResultImageTexture,targetIndex);
float4 resultImageGradient = tex1Dfetch(firstResultImageGradientTexture,tid);
float4 gradValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// No computation is performed if any of the point is part of the background
// The two is added because the image is resample between 2 and bin +2
// if 64 bins are used the histogram will have 68 bins et the image will be between 2 and 65
if( targetImageValue>0.0f &&
resultImageValue>0.0f &&
targetImageValue<c_firstTargetBin &&
resultImageValue<c_firstResultBin &&
targetImageValue==targetImageValue &&
resultImageValue==resultImageValue){
targetImageValue = floor(targetImageValue);
resultImageValue = floor(resultImageValue);
float3 resDeriv = make_float3(
resultImageGradient.x,
resultImageGradient.y,
resultImageGradient.z);
if( resultImageGradient.x==resultImageGradient.x &&
resultImageGradient.y==resultImageGradient.y &&
resultImageGradient.z==resultImageGradient.z){
float jointEntropyDerivative_X = 0.0f;
float movingEntropyDerivative_X = 0.0f;
float fixedEntropyDerivative_X = 0.0f;
float jointEntropyDerivative_Y = 0.0f;
float movingEntropyDerivative_Y = 0.0f;
float fixedEntropyDerivative_Y = 0.0f;
float jointEntropyDerivative_Z = 0.0f;
float movingEntropyDerivative_Z = 0.0f;
float fixedEntropyDerivative_Z = 0.0f;
for(int t=(int)(targetImageValue-1.0f); t<(int)(targetImageValue+2.0f); t++){
if(-1<t && t<c_firstTargetBin){
for(int r=(int)(resultImageValue-1.0f); r<(int)(resultImageValue+2.0f); r++){
if(-1<r && r<c_firstResultBin){
float commonValue = GetBasisSplineValue((float)t-targetImageValue) *
GetBasisSplineDerivativeValue((float)r-resultImageValue);
float jointLog = tex1Dfetch(histogramTexture, r*c_firstResultBin+t);
float targetLog = tex1Dfetch(histogramTexture, c_firstTargetBin*c_firstResultBin+t);
float resultLog = tex1Dfetch(histogramTexture, c_firstTargetBin*c_firstResultBin+c_firstTargetBin+r);
float temp = commonValue * resDeriv.x;
jointEntropyDerivative_X -= temp * jointLog;
fixedEntropyDerivative_X -= temp * targetLog;
movingEntropyDerivative_X -= temp * resultLog;
temp = commonValue * resDeriv.y;
jointEntropyDerivative_Y -= temp * jointLog;
fixedEntropyDerivative_Y -= temp * targetLog;
movingEntropyDerivative_Y -= temp * resultLog;
temp = commonValue * resDeriv.z;
jointEntropyDerivative_Z -= temp * jointLog;
fixedEntropyDerivative_Z -= temp * targetLog;
movingEntropyDerivative_Z -= temp * resultLog;
} // O<t<bin
} // t
} // 0<r<bin
} // r
float NMI= c_NMI;
float temp = c_Entropies.z;
// (Marc) I removed the normalisation by the voxel number as each gradient has to be normalised in the same way
gradValue.x = (fixedEntropyDerivative_X + movingEntropyDerivative_X - NMI * jointEntropyDerivative_X) / temp;
gradValue.y = (fixedEntropyDerivative_Y + movingEntropyDerivative_Y - NMI * jointEntropyDerivative_Y) / temp;
gradValue.z = (fixedEntropyDerivative_Z + movingEntropyDerivative_Z - NMI * jointEntropyDerivative_Z) / temp;
}
}
voxelNMIGradientArray_d[targetIndex]=gradValue;
}
return;
}
// Multichannel NMI gradient. Hardcoded for 2x2 NMI channels.
__global__ void reg_getVoxelBasedNMIGradientUsingPW2x2_kernel(float4 *voxelNMIGradientArray_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_ActiveVoxelNumber){
const int targetIndex = tex1Dfetch(maskTexture,tid);
int tempIndex=targetIndex;
const int z = tempIndex/(c_ImageSize.x*c_ImageSize.y);
tempIndex -= z*c_ImageSize.x*c_ImageSize.y;
const int y = tempIndex/c_ImageSize.x;
const int x = tempIndex - y*c_ImageSize.x;
float4 voxelValues = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
voxelValues.x = tex3D(firstTargetImageTexture,
((float)x+0.5f)/(float)c_ImageSize.x,
((float)y+0.5f)/(float)c_ImageSize.y,
((float)z+0.5f)/(float)c_ImageSize.z);
voxelValues.x = tex3D(secondTargetImageTexture,
((float)x+0.5f)/(float)c_ImageSize.x,
((float)y+0.5f)/(float)c_ImageSize.y,
((float)z+0.5f)/(float)c_ImageSize.z);
voxelValues.z = tex1Dfetch(firstResultImageTexture,targetIndex);
voxelValues.w = tex1Dfetch(secondResultImageTexture,targetIndex);
float4 firstResultImageGradient = tex1Dfetch(firstResultImageGradientTexture,tid);
float4 secondResultImageGradient = tex1Dfetch(secondResultImageGradientTexture,tid);
float4 gradValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// Could remove some tests (which are not really needed) to reduce register
// count. They should be put in again at some point for completeness and generality.
if (voxelValues.x == voxelValues.x &&
voxelValues.y == voxelValues.y &&
voxelValues.z == voxelValues.z &&
voxelValues.w == voxelValues.w &&
voxelValues.x >= 0.0f &&
voxelValues.y >= 0.0f &&
voxelValues.z >= 0.0f &&
voxelValues.w >= 0.0f &&
voxelValues.x < c_firstTargetBin &&
voxelValues.y < c_secondTargetBin &&
voxelValues.z < c_firstResultBin &&
voxelValues.w < c_secondResultBin)
{
voxelValues.x = (float)((int)voxelValues.x);
voxelValues.y = (float)((int)voxelValues.y);
voxelValues.z = (float)((int)voxelValues.z);
voxelValues.w = (float)((int)voxelValues.w);
if( firstResultImageGradient.x==firstResultImageGradient.x &&
firstResultImageGradient.y==firstResultImageGradient.y &&
firstResultImageGradient.z==firstResultImageGradient.z &&
secondResultImageGradient.x==secondResultImageGradient.x &&
secondResultImageGradient.y==secondResultImageGradient.y &&
secondResultImageGradient.z==secondResultImageGradient.z)
{
float jointEntropyDerivative_X = 0.0f;
float movingEntropyDerivative_X = 0.0f;
float fixedEntropyDerivative_X = 0.0f;
float jointEntropyDerivative_Y = 0.0f;
float movingEntropyDerivative_Y = 0.0f;
float fixedEntropyDerivative_Y = 0.0f;
float jointEntropyDerivative_Z = 0.0f;
float movingEntropyDerivative_Z = 0.0f;
float fixedEntropyDerivative_Z = 0.0f;
float jointLog, targetLog, resultLog, temp;
float4 relative_pos = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
float s_x, s_y, s_z, s_w;
float common_target_value = 0.0f;
int target_flat_index, result_flat_index, total_target_entries, num_probabilities;
for (int i=-1; i<2; ++i) {
relative_pos.x = (int)(voxelValues.x+i);
if (-1<relative_pos.x && relative_pos.x<c_firstTargetBin) {
for (int j=-1; j<2; ++j) {
relative_pos.y = (int)(voxelValues.y+j);
if (-1<relative_pos.y && relative_pos.y<c_secondTargetBin) {
s_x = GetBasisSplineValue(relative_pos.x-voxelValues.x);
s_y = GetBasisSplineValue(relative_pos.y-voxelValues.y);
common_target_value = s_x * s_y;
for (int k=-1; k<2; ++k) {
relative_pos.z = (int)(voxelValues.z+k);
if (-1<relative_pos.z && relative_pos.z<c_firstResultBin) {
s_x = GetBasisSplineDerivativeValue(relative_pos.z-voxelValues.z);
s_w = GetBasisSplineValue(relative_pos.z-voxelValues.z);
for (int l=-1; l<2; ++l) {
relative_pos.w = (int)(voxelValues.w+l);
if (-1<relative_pos.w && relative_pos.w<c_secondResultBin) {
target_flat_index = relative_pos.x + relative_pos.y * c_firstTargetBin;
result_flat_index = relative_pos.z + relative_pos.w * c_firstResultBin;
total_target_entries = c_firstTargetBin * c_secondTargetBin;
num_probabilities = total_target_entries * c_firstResultBin * c_secondResultBin;
jointLog = tex1Dfetch(histogramTexture, target_flat_index + (result_flat_index * total_target_entries));
targetLog = tex1Dfetch(histogramTexture, num_probabilities + target_flat_index);
resultLog = tex1Dfetch(histogramTexture, num_probabilities + total_target_entries + result_flat_index);
// Contribution from floating images. These arithmetic operations use
// a lot of registers. Need to look into whether this can be reduced somehow.
s_y = GetBasisSplineValue(relative_pos.w-voxelValues.w);
s_z = GetBasisSplineDerivativeValue(relative_pos.w-voxelValues.w);
temp = (s_x * firstResultImageGradient.x * s_y) +
(s_z * secondResultImageGradient.x * s_w);
temp *= common_target_value;
jointEntropyDerivative_X -= temp * jointLog;
fixedEntropyDerivative_X -= temp * targetLog;
movingEntropyDerivative_X -= temp * resultLog;
temp = (s_x * firstResultImageGradient.y * s_y) +
(s_z * secondResultImageGradient.y * s_w);
temp *= common_target_value;
jointEntropyDerivative_Y -= temp * jointLog;
fixedEntropyDerivative_Y -= temp * targetLog;
movingEntropyDerivative_Y -= temp * resultLog;
temp = (s_x * firstResultImageGradient.z * s_y) +
(s_z * secondResultImageGradient.z * s_w);
temp *= common_target_value;
jointEntropyDerivative_Z -= temp * jointLog;
fixedEntropyDerivative_Z -= temp * targetLog;
movingEntropyDerivative_Z -= temp * resultLog;
}
}
}
}
}
}
}
}
gradValue.x = (fixedEntropyDerivative_X + movingEntropyDerivative_X - c_NMI * jointEntropyDerivative_X) / c_Entropies.z;
gradValue.y = (fixedEntropyDerivative_Y + movingEntropyDerivative_Y - c_NMI * jointEntropyDerivative_Y) / c_Entropies.z;
gradValue.z = (fixedEntropyDerivative_Z + movingEntropyDerivative_Z - c_NMI * jointEntropyDerivative_Z) / c_Entropies.z;
}
}
voxelNMIGradientArray_d[targetIndex]=gradValue;
}
}
__global__ void reg_smoothJointHistogramX_kernel(float *tempHistogram)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_secondTargetBin*c_firstResultBin*c_secondResultBin){
// The starting index is computed
unsigned int startingPoint=tid*c_firstTargetBin;
unsigned int finishPoint=startingPoint+c_firstTargetBin;
// The first point is computed
tempHistogram[startingPoint] = (tex1Dfetch(histogramTexture, startingPoint) * COEFF_C +
tex1Dfetch(histogramTexture, startingPoint+1) * COEFF_L) / COEFF_B;
// The middle points are computed
for(unsigned int i=startingPoint+1; i<finishPoint-1; ++i){
tempHistogram[i] = tex1Dfetch(histogramTexture, i-1) * COEFF_L +
tex1Dfetch(histogramTexture, i) * COEFF_C +
tex1Dfetch(histogramTexture, i+1) * COEFF_L;
}
// The last point is computed
tempHistogram[finishPoint-1] = (tex1Dfetch(histogramTexture, finishPoint-2) * COEFF_L +
tex1Dfetch(histogramTexture, finishPoint-1) * COEFF_C) / COEFF_B;
}
return;
}
__global__ void reg_smoothJointHistogramY_kernel(float *tempHistogram)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_firstResultBin*c_secondResultBin){
// The starting index is computed
unsigned int startingPoint=tid + c_firstTargetBin*(c_secondTargetBin-1)*(c_firstResultBin*(int)(tid/(c_firstTargetBin*c_firstResultBin)) +
(int)(tid/c_firstTargetBin - c_firstResultBin * (int)(tid/(c_firstTargetBin*c_firstResultBin))));
unsigned int increment = c_firstTargetBin;
unsigned int finishPoint=startingPoint+increment*c_secondTargetBin;
// The first point is computed
tempHistogram[startingPoint] = (tex1Dfetch(histogramTexture, startingPoint) * COEFF_C +
tex1Dfetch(histogramTexture, startingPoint+increment) * COEFF_L) / COEFF_B;
// The middle points are computed
for(unsigned int i=startingPoint+increment; i<finishPoint-increment; i+=increment){
tempHistogram[i] = tex1Dfetch(histogramTexture, i-increment) * COEFF_L +
tex1Dfetch(histogramTexture, i) * COEFF_C +
tex1Dfetch(histogramTexture, i+increment) * COEFF_L;
}
// The last point is computed
tempHistogram[finishPoint-increment] = (tex1Dfetch(histogramTexture, finishPoint-2*increment) * COEFF_L +
tex1Dfetch(histogramTexture, finishPoint-increment) * COEFF_C) / COEFF_B;
}
return;
}
__global__ void reg_smoothJointHistogramZ_kernel(float *tempHistogram)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_secondTargetBin*c_secondResultBin){
// The starting index is computed
unsigned int startingPoint=tid+c_firstTargetBin*c_secondTargetBin*(c_firstResultBin-1)*(int)(tid/(c_firstTargetBin*c_secondTargetBin));
unsigned int increment = c_firstTargetBin*c_secondTargetBin;
unsigned int finishPoint=startingPoint+increment*c_firstResultBin;
// The first point is computed
tempHistogram[startingPoint] = (tex1Dfetch(histogramTexture, startingPoint) * COEFF_C +
tex1Dfetch(histogramTexture, startingPoint+increment) * COEFF_L) / COEFF_B;
// The middle points are computed
for(unsigned int i=startingPoint+increment; i<finishPoint-increment; i+=increment){
tempHistogram[i] = tex1Dfetch(histogramTexture, i-increment) * COEFF_L +
tex1Dfetch(histogramTexture, i) * COEFF_C +
tex1Dfetch(histogramTexture, i+increment) * COEFF_L;
}
// The last point is computed
tempHistogram[finishPoint-increment] = (tex1Dfetch(histogramTexture, finishPoint-2*increment) * COEFF_L +
tex1Dfetch(histogramTexture, finishPoint-increment) * COEFF_C) / COEFF_B;
}
return;
}
__global__ void reg_smoothJointHistogramW_kernel(float *tempHistogram)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_secondTargetBin*c_firstResultBin){
// The starting index is computed
unsigned int startingPoint=tid;
unsigned int increment = c_firstTargetBin*c_secondTargetBin*c_firstResultBin;
unsigned int finishPoint=increment*c_secondResultBin;
// The first point is computed
tempHistogram[startingPoint] = (tex1Dfetch(histogramTexture, startingPoint) * COEFF_C +
tex1Dfetch(histogramTexture, startingPoint+increment) * COEFF_L) / COEFF_B;
// The middle points are computed
for(unsigned int i=startingPoint+increment; i<finishPoint-increment; i+=increment){
tempHistogram[i] = tex1Dfetch(histogramTexture, i-increment) * COEFF_L +
tex1Dfetch(histogramTexture, i) * COEFF_C +
tex1Dfetch(histogramTexture, i+increment) * COEFF_L;
}
// The last point is computed
tempHistogram[finishPoint-increment] = (tex1Dfetch(histogramTexture, finishPoint-2*increment) * COEFF_L +
tex1Dfetch(histogramTexture, finishPoint-increment) * COEFF_C) / COEFF_B;
}
return;
}
/// Kernels for marginalisation along the different axes
__global__ void reg_marginaliseTargetX_kernel(float *babyHisto)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_secondTargetBin*c_firstResultBin*c_secondResultBin){
unsigned int startingPoint=tid*c_firstTargetBin;
unsigned int finishPoint=startingPoint+c_firstTargetBin;
float sum=tex1Dfetch(histogramTexture, startingPoint);
float c=0.f,Y,t;
for(unsigned int i=startingPoint+1; i<finishPoint; ++i){
Y = tex1Dfetch(histogramTexture, i) - c;
t = sum + Y;
c = (t-sum)-Y;
sum=t;
}
babyHisto[tid]=sum;
}
}
__global__ void reg_marginaliseTargetXY_kernel(float *babyHisto)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstResultBin*c_secondResultBin){
unsigned int startingPoint=tid*c_secondTargetBin;
unsigned int finishPoint=startingPoint+c_secondTargetBin;
float sum=tex1Dfetch(histogramTexture, startingPoint);
float c=0.f,Y,t;
for(unsigned int i=startingPoint+1; i<finishPoint; ++i){
Y = tex1Dfetch(histogramTexture, i) - c;
t = sum + Y;
c = (t-sum)-Y;
sum=t;
}
babyHisto[tid]=sum;
}
}
__global__ void reg_marginaliseResultX_kernel(float *babyHisto)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_secondTargetBin*c_firstResultBin){
unsigned int startingPoint = tid;
float sum=tex1Dfetch(histogramTexture, startingPoint);
// increment by a the cube
unsigned int increment = c_firstTargetBin*c_secondTargetBin*c_firstResultBin;
float c=0.f,Y,t;
for (unsigned int i = 1; i < c_secondResultBin; ++i)
{
Y = tex1Dfetch(histogramTexture, startingPoint + i *increment) - c;
t = sum + Y;
c = (t-sum)-Y;
sum=t;
}
babyHisto[tid]=sum;
}
}
__global__ void reg_marginaliseResultXY_kernel(float *babyHisto)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_secondTargetBin){
unsigned int startingPoint=tid;
float sum=tex1Dfetch(histogramTexture, startingPoint);
// increment by the plane.
unsigned int increment = c_firstTargetBin*c_secondTargetBin;
float c=0.f,Y,t;
for (unsigned int i = 1; i < c_firstResultBin; ++i)
{
Y = tex1Dfetch(histogramTexture, startingPoint + i *increment) - c;
t = sum + Y;
c = (t-sum)-Y;
sum=t;
}
babyHisto[tid]=sum;
}
}
#endif
|
7068883ad9640cc1e5da6e68ca3be86dfc9cb469.cu
|
/*
* _reg_mutualinformation_kernels.cu
*
*
* Created by Marc Modat on 24/03/2009.
* Copyright (c) 2009, University College London. All rights reserved.
* Centre for Medical Image Computing (CMIC)
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
#ifndef _REG_MUTUALINFORMATION_kernels_CU
#define _REG_MUTUALINFORMATION_kernels_CU
#include <stdio.h>
#define COEFF_L 0.16666666f
#define COEFF_C 0.66666666f
#define COEFF_B 0.83333333f
__device__ __constant__ int c_VoxelNumber;
__device__ __constant__ int3 c_ImageSize;
// Bins: Need 4 values for max 4 channels.
__device__ __constant__ int c_firstTargetBin;
__device__ __constant__ int c_secondTargetBin;
__device__ __constant__ int c_firstResultBin;
__device__ __constant__ int c_secondResultBin;
__device__ __constant__ float4 c_Entropies;
__device__ __constant__ float c_NMI;
__device__ __constant__ int c_ActiveVoxelNumber;
texture<float, 3, cudaReadModeElementType> firstTargetImageTexture;
texture<float, 1, cudaReadModeElementType> firstResultImageTexture;
texture<float4, 1, cudaReadModeElementType> firstResultImageGradientTexture;
texture<float, 1, cudaReadModeElementType> histogramTexture;
texture<float4, 1, cudaReadModeElementType> gradientImageTexture;
texture<int, 1, cudaReadModeElementType> maskTexture;
/// Added for the multichannel stuff. We currently only support 2 target and 2 source channels.
/// So we need another texture for the second target and source channel respectively.
texture<float, 3, cudaReadModeElementType> secondTargetImageTexture;
texture<float, 1, cudaReadModeElementType> secondResultImageTexture;
texture<float4, 1, cudaReadModeElementType> secondResultImageGradientTexture;
__device__ float GetBasisSplineValue(float x)
{
x=fabsf(x);
float value=0.0f;
if(x<2.0f)
if(x<1.0f)
value = 2.0f/3.0f + (0.5f*x-1.0f)*x*x;
else{
x-=2.0f;
value = -x*x*x/6.0f;
}
return value;
}
__device__ float GetBasisSplineDerivativeValue(float ori)
{
float x=fabsf(ori);
float value=0.0f;
if(x<2.0f)
if(x<1.0f)
value = (1.5f*x-2.0f)*ori;
else{
x-=2.0f;
value = -0.5f * x * x;
if(ori<0.0f)value =-value;
}
return value;
}
__global__ void reg_getVoxelBasedNMIGradientUsingPW_kernel(float4 *voxelNMIGradientArray_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_ActiveVoxelNumber){
const int targetIndex = tex1Dfetch(maskTexture,tid);
int tempIndex=targetIndex;
const int z = tempIndex/(c_ImageSize.x*c_ImageSize.y);
tempIndex -= z*c_ImageSize.x*c_ImageSize.y;
const int y = tempIndex/c_ImageSize.x;
const int x = tempIndex - y*c_ImageSize.x;
float targetImageValue = tex3D(firstTargetImageTexture,
((float)x+0.5f)/(float)c_ImageSize.x,
((float)y+0.5f)/(float)c_ImageSize.y,
((float)z+0.5f)/(float)c_ImageSize.z);
float resultImageValue = tex1Dfetch(firstResultImageTexture,targetIndex);
float4 resultImageGradient = tex1Dfetch(firstResultImageGradientTexture,tid);
float4 gradValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// No computation is performed if any of the point is part of the background
// The two is added because the image is resample between 2 and bin +2
// if 64 bins are used the histogram will have 68 bins et the image will be between 2 and 65
if( targetImageValue>0.0f &&
resultImageValue>0.0f &&
targetImageValue<c_firstTargetBin &&
resultImageValue<c_firstResultBin &&
targetImageValue==targetImageValue &&
resultImageValue==resultImageValue){
targetImageValue = floor(targetImageValue);
resultImageValue = floor(resultImageValue);
float3 resDeriv = make_float3(
resultImageGradient.x,
resultImageGradient.y,
resultImageGradient.z);
if( resultImageGradient.x==resultImageGradient.x &&
resultImageGradient.y==resultImageGradient.y &&
resultImageGradient.z==resultImageGradient.z){
float jointEntropyDerivative_X = 0.0f;
float movingEntropyDerivative_X = 0.0f;
float fixedEntropyDerivative_X = 0.0f;
float jointEntropyDerivative_Y = 0.0f;
float movingEntropyDerivative_Y = 0.0f;
float fixedEntropyDerivative_Y = 0.0f;
float jointEntropyDerivative_Z = 0.0f;
float movingEntropyDerivative_Z = 0.0f;
float fixedEntropyDerivative_Z = 0.0f;
for(int t=(int)(targetImageValue-1.0f); t<(int)(targetImageValue+2.0f); t++){
if(-1<t && t<c_firstTargetBin){
for(int r=(int)(resultImageValue-1.0f); r<(int)(resultImageValue+2.0f); r++){
if(-1<r && r<c_firstResultBin){
float commonValue = GetBasisSplineValue((float)t-targetImageValue) *
GetBasisSplineDerivativeValue((float)r-resultImageValue);
float jointLog = tex1Dfetch(histogramTexture, r*c_firstResultBin+t);
float targetLog = tex1Dfetch(histogramTexture, c_firstTargetBin*c_firstResultBin+t);
float resultLog = tex1Dfetch(histogramTexture, c_firstTargetBin*c_firstResultBin+c_firstTargetBin+r);
float temp = commonValue * resDeriv.x;
jointEntropyDerivative_X -= temp * jointLog;
fixedEntropyDerivative_X -= temp * targetLog;
movingEntropyDerivative_X -= temp * resultLog;
temp = commonValue * resDeriv.y;
jointEntropyDerivative_Y -= temp * jointLog;
fixedEntropyDerivative_Y -= temp * targetLog;
movingEntropyDerivative_Y -= temp * resultLog;
temp = commonValue * resDeriv.z;
jointEntropyDerivative_Z -= temp * jointLog;
fixedEntropyDerivative_Z -= temp * targetLog;
movingEntropyDerivative_Z -= temp * resultLog;
} // O<t<bin
} // t
} // 0<r<bin
} // r
float NMI= c_NMI;
float temp = c_Entropies.z;
// (Marc) I removed the normalisation by the voxel number as each gradient has to be normalised in the same way
gradValue.x = (fixedEntropyDerivative_X + movingEntropyDerivative_X - NMI * jointEntropyDerivative_X) / temp;
gradValue.y = (fixedEntropyDerivative_Y + movingEntropyDerivative_Y - NMI * jointEntropyDerivative_Y) / temp;
gradValue.z = (fixedEntropyDerivative_Z + movingEntropyDerivative_Z - NMI * jointEntropyDerivative_Z) / temp;
}
}
voxelNMIGradientArray_d[targetIndex]=gradValue;
}
return;
}
// Multichannel NMI gradient. Hardcoded for 2x2 NMI channels.
__global__ void reg_getVoxelBasedNMIGradientUsingPW2x2_kernel(float4 *voxelNMIGradientArray_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_ActiveVoxelNumber){
const int targetIndex = tex1Dfetch(maskTexture,tid);
int tempIndex=targetIndex;
const int z = tempIndex/(c_ImageSize.x*c_ImageSize.y);
tempIndex -= z*c_ImageSize.x*c_ImageSize.y;
const int y = tempIndex/c_ImageSize.x;
const int x = tempIndex - y*c_ImageSize.x;
float4 voxelValues = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
voxelValues.x = tex3D(firstTargetImageTexture,
((float)x+0.5f)/(float)c_ImageSize.x,
((float)y+0.5f)/(float)c_ImageSize.y,
((float)z+0.5f)/(float)c_ImageSize.z);
voxelValues.x = tex3D(secondTargetImageTexture,
((float)x+0.5f)/(float)c_ImageSize.x,
((float)y+0.5f)/(float)c_ImageSize.y,
((float)z+0.5f)/(float)c_ImageSize.z);
voxelValues.z = tex1Dfetch(firstResultImageTexture,targetIndex);
voxelValues.w = tex1Dfetch(secondResultImageTexture,targetIndex);
float4 firstResultImageGradient = tex1Dfetch(firstResultImageGradientTexture,tid);
float4 secondResultImageGradient = tex1Dfetch(secondResultImageGradientTexture,tid);
float4 gradValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// Could remove some tests (which are not really needed) to reduce register
// count. They should be put in again at some point for completeness and generality.
if (voxelValues.x == voxelValues.x &&
voxelValues.y == voxelValues.y &&
voxelValues.z == voxelValues.z &&
voxelValues.w == voxelValues.w &&
voxelValues.x >= 0.0f &&
voxelValues.y >= 0.0f &&
voxelValues.z >= 0.0f &&
voxelValues.w >= 0.0f &&
voxelValues.x < c_firstTargetBin &&
voxelValues.y < c_secondTargetBin &&
voxelValues.z < c_firstResultBin &&
voxelValues.w < c_secondResultBin)
{
voxelValues.x = (float)((int)voxelValues.x);
voxelValues.y = (float)((int)voxelValues.y);
voxelValues.z = (float)((int)voxelValues.z);
voxelValues.w = (float)((int)voxelValues.w);
if( firstResultImageGradient.x==firstResultImageGradient.x &&
firstResultImageGradient.y==firstResultImageGradient.y &&
firstResultImageGradient.z==firstResultImageGradient.z &&
secondResultImageGradient.x==secondResultImageGradient.x &&
secondResultImageGradient.y==secondResultImageGradient.y &&
secondResultImageGradient.z==secondResultImageGradient.z)
{
float jointEntropyDerivative_X = 0.0f;
float movingEntropyDerivative_X = 0.0f;
float fixedEntropyDerivative_X = 0.0f;
float jointEntropyDerivative_Y = 0.0f;
float movingEntropyDerivative_Y = 0.0f;
float fixedEntropyDerivative_Y = 0.0f;
float jointEntropyDerivative_Z = 0.0f;
float movingEntropyDerivative_Z = 0.0f;
float fixedEntropyDerivative_Z = 0.0f;
float jointLog, targetLog, resultLog, temp;
float4 relative_pos = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
float s_x, s_y, s_z, s_w;
float common_target_value = 0.0f;
int target_flat_index, result_flat_index, total_target_entries, num_probabilities;
for (int i=-1; i<2; ++i) {
relative_pos.x = (int)(voxelValues.x+i);
if (-1<relative_pos.x && relative_pos.x<c_firstTargetBin) {
for (int j=-1; j<2; ++j) {
relative_pos.y = (int)(voxelValues.y+j);
if (-1<relative_pos.y && relative_pos.y<c_secondTargetBin) {
s_x = GetBasisSplineValue(relative_pos.x-voxelValues.x);
s_y = GetBasisSplineValue(relative_pos.y-voxelValues.y);
common_target_value = s_x * s_y;
for (int k=-1; k<2; ++k) {
relative_pos.z = (int)(voxelValues.z+k);
if (-1<relative_pos.z && relative_pos.z<c_firstResultBin) {
s_x = GetBasisSplineDerivativeValue(relative_pos.z-voxelValues.z);
s_w = GetBasisSplineValue(relative_pos.z-voxelValues.z);
for (int l=-1; l<2; ++l) {
relative_pos.w = (int)(voxelValues.w+l);
if (-1<relative_pos.w && relative_pos.w<c_secondResultBin) {
target_flat_index = relative_pos.x + relative_pos.y * c_firstTargetBin;
result_flat_index = relative_pos.z + relative_pos.w * c_firstResultBin;
total_target_entries = c_firstTargetBin * c_secondTargetBin;
num_probabilities = total_target_entries * c_firstResultBin * c_secondResultBin;
jointLog = tex1Dfetch(histogramTexture, target_flat_index + (result_flat_index * total_target_entries));
targetLog = tex1Dfetch(histogramTexture, num_probabilities + target_flat_index);
resultLog = tex1Dfetch(histogramTexture, num_probabilities + total_target_entries + result_flat_index);
// Contribution from floating images. These arithmetic operations use
// a lot of registers. Need to look into whether this can be reduced somehow.
s_y = GetBasisSplineValue(relative_pos.w-voxelValues.w);
s_z = GetBasisSplineDerivativeValue(relative_pos.w-voxelValues.w);
temp = (s_x * firstResultImageGradient.x * s_y) +
(s_z * secondResultImageGradient.x * s_w);
temp *= common_target_value;
jointEntropyDerivative_X -= temp * jointLog;
fixedEntropyDerivative_X -= temp * targetLog;
movingEntropyDerivative_X -= temp * resultLog;
temp = (s_x * firstResultImageGradient.y * s_y) +
(s_z * secondResultImageGradient.y * s_w);
temp *= common_target_value;
jointEntropyDerivative_Y -= temp * jointLog;
fixedEntropyDerivative_Y -= temp * targetLog;
movingEntropyDerivative_Y -= temp * resultLog;
temp = (s_x * firstResultImageGradient.z * s_y) +
(s_z * secondResultImageGradient.z * s_w);
temp *= common_target_value;
jointEntropyDerivative_Z -= temp * jointLog;
fixedEntropyDerivative_Z -= temp * targetLog;
movingEntropyDerivative_Z -= temp * resultLog;
}
}
}
}
}
}
}
}
gradValue.x = (fixedEntropyDerivative_X + movingEntropyDerivative_X - c_NMI * jointEntropyDerivative_X) / c_Entropies.z;
gradValue.y = (fixedEntropyDerivative_Y + movingEntropyDerivative_Y - c_NMI * jointEntropyDerivative_Y) / c_Entropies.z;
gradValue.z = (fixedEntropyDerivative_Z + movingEntropyDerivative_Z - c_NMI * jointEntropyDerivative_Z) / c_Entropies.z;
}
}
voxelNMIGradientArray_d[targetIndex]=gradValue;
}
}
__global__ void reg_smoothJointHistogramX_kernel(float *tempHistogram)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_secondTargetBin*c_firstResultBin*c_secondResultBin){
// The starting index is computed
unsigned int startingPoint=tid*c_firstTargetBin;
unsigned int finishPoint=startingPoint+c_firstTargetBin;
// The first point is computed
tempHistogram[startingPoint] = (tex1Dfetch(histogramTexture, startingPoint) * COEFF_C +
tex1Dfetch(histogramTexture, startingPoint+1) * COEFF_L) / COEFF_B;
// The middle points are computed
for(unsigned int i=startingPoint+1; i<finishPoint-1; ++i){
tempHistogram[i] = tex1Dfetch(histogramTexture, i-1) * COEFF_L +
tex1Dfetch(histogramTexture, i) * COEFF_C +
tex1Dfetch(histogramTexture, i+1) * COEFF_L;
}
// The last point is computed
tempHistogram[finishPoint-1] = (tex1Dfetch(histogramTexture, finishPoint-2) * COEFF_L +
tex1Dfetch(histogramTexture, finishPoint-1) * COEFF_C) / COEFF_B;
}
return;
}
__global__ void reg_smoothJointHistogramY_kernel(float *tempHistogram)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_firstResultBin*c_secondResultBin){
// The starting index is computed
unsigned int startingPoint=tid + c_firstTargetBin*(c_secondTargetBin-1)*(c_firstResultBin*(int)(tid/(c_firstTargetBin*c_firstResultBin)) +
(int)(tid/c_firstTargetBin - c_firstResultBin * (int)(tid/(c_firstTargetBin*c_firstResultBin))));
unsigned int increment = c_firstTargetBin;
unsigned int finishPoint=startingPoint+increment*c_secondTargetBin;
// The first point is computed
tempHistogram[startingPoint] = (tex1Dfetch(histogramTexture, startingPoint) * COEFF_C +
tex1Dfetch(histogramTexture, startingPoint+increment) * COEFF_L) / COEFF_B;
// The middle points are computed
for(unsigned int i=startingPoint+increment; i<finishPoint-increment; i+=increment){
tempHistogram[i] = tex1Dfetch(histogramTexture, i-increment) * COEFF_L +
tex1Dfetch(histogramTexture, i) * COEFF_C +
tex1Dfetch(histogramTexture, i+increment) * COEFF_L;
}
// The last point is computed
tempHistogram[finishPoint-increment] = (tex1Dfetch(histogramTexture, finishPoint-2*increment) * COEFF_L +
tex1Dfetch(histogramTexture, finishPoint-increment) * COEFF_C) / COEFF_B;
}
return;
}
__global__ void reg_smoothJointHistogramZ_kernel(float *tempHistogram)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_secondTargetBin*c_secondResultBin){
// The starting index is computed
unsigned int startingPoint=tid+c_firstTargetBin*c_secondTargetBin*(c_firstResultBin-1)*(int)(tid/(c_firstTargetBin*c_secondTargetBin));
unsigned int increment = c_firstTargetBin*c_secondTargetBin;
unsigned int finishPoint=startingPoint+increment*c_firstResultBin;
// The first point is computed
tempHistogram[startingPoint] = (tex1Dfetch(histogramTexture, startingPoint) * COEFF_C +
tex1Dfetch(histogramTexture, startingPoint+increment) * COEFF_L) / COEFF_B;
// The middle points are computed
for(unsigned int i=startingPoint+increment; i<finishPoint-increment; i+=increment){
tempHistogram[i] = tex1Dfetch(histogramTexture, i-increment) * COEFF_L +
tex1Dfetch(histogramTexture, i) * COEFF_C +
tex1Dfetch(histogramTexture, i+increment) * COEFF_L;
}
// The last point is computed
tempHistogram[finishPoint-increment] = (tex1Dfetch(histogramTexture, finishPoint-2*increment) * COEFF_L +
tex1Dfetch(histogramTexture, finishPoint-increment) * COEFF_C) / COEFF_B;
}
return;
}
__global__ void reg_smoothJointHistogramW_kernel(float *tempHistogram)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_secondTargetBin*c_firstResultBin){
// The starting index is computed
unsigned int startingPoint=tid;
unsigned int increment = c_firstTargetBin*c_secondTargetBin*c_firstResultBin;
unsigned int finishPoint=increment*c_secondResultBin;
// The first point is computed
tempHistogram[startingPoint] = (tex1Dfetch(histogramTexture, startingPoint) * COEFF_C +
tex1Dfetch(histogramTexture, startingPoint+increment) * COEFF_L) / COEFF_B;
// The middle points are computed
for(unsigned int i=startingPoint+increment; i<finishPoint-increment; i+=increment){
tempHistogram[i] = tex1Dfetch(histogramTexture, i-increment) * COEFF_L +
tex1Dfetch(histogramTexture, i) * COEFF_C +
tex1Dfetch(histogramTexture, i+increment) * COEFF_L;
}
// The last point is computed
tempHistogram[finishPoint-increment] = (tex1Dfetch(histogramTexture, finishPoint-2*increment) * COEFF_L +
tex1Dfetch(histogramTexture, finishPoint-increment) * COEFF_C) / COEFF_B;
}
return;
}
/// Kernels for marginalisation along the different axes
__global__ void reg_marginaliseTargetX_kernel(float *babyHisto)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_secondTargetBin*c_firstResultBin*c_secondResultBin){
unsigned int startingPoint=tid*c_firstTargetBin;
unsigned int finishPoint=startingPoint+c_firstTargetBin;
float sum=tex1Dfetch(histogramTexture, startingPoint);
float c=0.f,Y,t;
for(unsigned int i=startingPoint+1; i<finishPoint; ++i){
Y = tex1Dfetch(histogramTexture, i) - c;
t = sum + Y;
c = (t-sum)-Y;
sum=t;
}
babyHisto[tid]=sum;
}
}
__global__ void reg_marginaliseTargetXY_kernel(float *babyHisto)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstResultBin*c_secondResultBin){
unsigned int startingPoint=tid*c_secondTargetBin;
unsigned int finishPoint=startingPoint+c_secondTargetBin;
float sum=tex1Dfetch(histogramTexture, startingPoint);
float c=0.f,Y,t;
for(unsigned int i=startingPoint+1; i<finishPoint; ++i){
Y = tex1Dfetch(histogramTexture, i) - c;
t = sum + Y;
c = (t-sum)-Y;
sum=t;
}
babyHisto[tid]=sum;
}
}
__global__ void reg_marginaliseResultX_kernel(float *babyHisto)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_secondTargetBin*c_firstResultBin){
unsigned int startingPoint = tid;
float sum=tex1Dfetch(histogramTexture, startingPoint);
// increment by a the cube
unsigned int increment = c_firstTargetBin*c_secondTargetBin*c_firstResultBin;
float c=0.f,Y,t;
for (unsigned int i = 1; i < c_secondResultBin; ++i)
{
Y = tex1Dfetch(histogramTexture, startingPoint + i *increment) - c;
t = sum + Y;
c = (t-sum)-Y;
sum=t;
}
babyHisto[tid]=sum;
}
}
__global__ void reg_marginaliseResultXY_kernel(float *babyHisto)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_secondTargetBin){
unsigned int startingPoint=tid;
float sum=tex1Dfetch(histogramTexture, startingPoint);
// increment by the plane.
unsigned int increment = c_firstTargetBin*c_secondTargetBin;
float c=0.f,Y,t;
for (unsigned int i = 1; i < c_firstResultBin; ++i)
{
Y = tex1Dfetch(histogramTexture, startingPoint + i *increment) - c;
t = sum + Y;
c = (t-sum)-Y;
sum=t;
}
babyHisto[tid]=sum;
}
}
#endif
|
c1183d55fbf5b5d87389dd6a0c951106b35620cf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Implements vector
*/
#ifdef DEBUG
#endif
__global__ void kern_vec_add_(float* x, float* y, float* r, size_t dim)
{
size_t _strd = blockDim.x * gridDim.x;
for(size_t _i = blockIdx.x * blockDim.x + threadIdx.x; _i < dim; _i += _strd)
r[_i] = x[_i] + y[_i];
}
|
c1183d55fbf5b5d87389dd6a0c951106b35620cf.cu
|
#include "includes.h"
/*
* Implements vector
*/
#ifdef DEBUG
#endif
__global__ void kern_vec_add_(float* x, float* y, float* r, size_t dim)
{
size_t _strd = blockDim.x * gridDim.x;
for(size_t _i = blockIdx.x * blockDim.x + threadIdx.x; _i < dim; _i += _strd)
r[_i] = x[_i] + y[_i];
}
|
ff242f40e48cb0dace0db70529c5ad41a83378c1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <mex.h>
#include <gpu/mxGPUArray.h>
#include "cuda/cudamx_cudnn.cuh"
#define MAX(a,b) ((a)>(b))? (a) : (b)
#define ERROR(msg,...) mexErrMsgIdAndTxt("mex3DConv:err", msg, ##__VA_ARGS__)
void ConvForward(const CudaMxArray5D& in, const CudaMxArray5D& filter,
const CudaMxArray2D& bias, const int* pad,
const int* stride, mxArray **out_ptr) {
// check input validity
int c1 = in.channel();
int c2 = filter.channel2();
int h = in.height();
int w = in.width();
int d = in.depth();
int fh = filter.height();
int fw = filter.width();
int fd = filter.depth();
if (c1 != filter.channel()) {
ERROR("(# input channels: %d) != (# filter channels: %d).",
c1, filter.channel());
}
if (c2 != bias.height()) {
ERROR("(# out channels: %d) != (# bias channels: %d).",
c2, bias.height());
}
cudnnHandle_t handle;
CHECKCUDNN(cudnnCreate(&handle));
cudnnTensorDescriptor_t input_desc, output_desc, bias_desc;
cudnnFilterDescriptor_t filter_desc;
cudnnConvolutionDescriptor_t conv_desc ;
CHECKCUDNN(cudnnCreateTensorDescriptor(&input_desc));
CHECKCUDNN(cudnnCreateTensorDescriptor(&output_desc));
CHECKCUDNN(cudnnCreateTensorDescriptor(&bias_desc)) ;
CHECKCUDNN(cudnnCreateFilterDescriptor(&filter_desc));
CHECKCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc));
int in_dim[] = {1, c1, d, w, h};
int in_stride[] = {c1*d*w*h, d*w*h, w*h, h, 1};
int filter_dim[] = {c2, c1, fd, fw, fh};
int bias_dim[] ={1, c2, 1, 1, 1};
int bias_stride[] = {c2, 1, 1, 1};
CHECKCUDNN(cudnnSetTensorNdDescriptor(input_desc,
CUDNN_DATA_FLOAT, 5, in_dim, in_stride));
CHECKCUDNN(cudnnSetTensorNdDescriptor(bias_desc,
CUDNN_DATA_FLOAT, 5, bias_dim, bias_stride));
CHECKCUDNN(cudnnSetFilterNdDescriptor(filter_desc,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 5, filter_dim));
int dilate[] ={1,1,1};
CHECKCUDNN(cudnnSetConvolutionNdDescriptor(conv_desc,
3, pad, stride, dilate, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
int out_dim[5];
CHECKCUDNN(cudnnGetConvolutionNdForwardOutputDim(conv_desc,
input_desc, filter_desc, 5, out_dim));
if (c2 != out_dim[1]) {
ERROR("(# input out channels: %d) != (# conv out channels: %d).",
c2, out_dim[1]);
}
int oh = out_dim[4];
int ow = out_dim[3];
int od = out_dim[2];
int out_stride[] = {c2*od*ow*oh, od*ow*oh, ow*oh, oh, 1};
CHECKCUDNN(cudnnSetTensorNdDescriptor(output_desc,
CUDNN_DATA_FLOAT, 5, out_dim, out_stride));
cudnnConvolutionFwdAlgo_t conv_algo;
CHECKCUDNN(cudnnGetConvolutionForwardAlgorithm(handle,
input_desc, filter_desc, conv_desc, output_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv_algo));
size_t workspace_size = 0;
CHECKCUDNN(cudnnGetConvolutionForwardWorkspaceSize(handle,
input_desc, filter_desc, conv_desc, output_desc, conv_algo,
&workspace_size));
CudaMxArray5D out(oh,ow,od,c2,out_dim[0]);
out.Wrap(out_ptr);
void *d_workspace;
hipMalloc(&d_workspace, workspace_size);
float alpha = 1, beta = 0;
CHECKCUDNN(cudnnConvolutionForward(handle,
&alpha, input_desc, in.data(), filter_desc, filter.data(),
conv_desc, conv_algo, d_workspace, workspace_size, &beta,
output_desc, out.data()));
beta = 1.0;
CHECKCUDNN(cudnnAddTensor(handle,
&alpha, bias_desc, bias.data(), &beta, output_desc,
out.data()));
hipFree(d_workspace);
cudnnDestroyTensorDescriptor(input_desc);
cudnnDestroyTensorDescriptor(output_desc);
cudnnDestroyTensorDescriptor(bias_desc);
cudnnDestroyFilterDescriptor(filter_desc);
cudnnDestroyConvolutionDescriptor(conv_desc);
cudnnDestroy(handle);
out.Destroy();
}
void ConvBackward(const CudaMxArray5D& in, const CudaMxArray5D& filter,
const CudaMxArray2D& bias, const CudaMxArray5D& der_out,
const int* pad, const int* stride,
mxArray **der_in_ptr, mxArray **der_filter_ptr,
mxArray **der_bias_ptr) {
// check input validity
int c1 = in.channel();
int c2 = filter.channel2();
int h = in.height();
int w = in.width();
int d = in.depth();
int oh = der_out.height();
int ow = der_out.width();
int od = der_out.depth();
int fh = filter.height();
int fw = filter.width();
int fd = filter.depth();
if (c1 != filter.channel()) {
ERROR("(# input channels: %d) != (# filter channels: %d).",
c1, filter.channel());
}
if (c2 != bias.height()) {
ERROR("(# out channels: %d) != (# bias channels: %d).",
c2, bias.height());
}
cudnnHandle_t handle;
CHECKCUDNN(cudnnCreate(&handle));
cudnnTensorDescriptor_t der_out_desc, data_desc, bias_desc;
cudnnFilterDescriptor_t filter_desc;
cudnnConvolutionDescriptor_t conv_desc ;
CHECKCUDNN(cudnnCreateTensorDescriptor(&der_out_desc));
CHECKCUDNN(cudnnCreateTensorDescriptor(&data_desc));
CHECKCUDNN(cudnnCreateTensorDescriptor(&bias_desc));
CHECKCUDNN(cudnnCreateFilterDescriptor(&filter_desc));
CHECKCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc));
int data_dim[] = {1, c1, d, w, h};
int data_stride[] = {c1*d*w*h, d*w*h, w*h, h, 1};
int der_out_dim[] = {1, c2, od, ow, oh};
int der_out_stride[] = {c2*od*ow*oh, od*ow*oh, ow*oh, oh, 1};
int filter_dim[] = {c2, c1, fd, fw, fh};
int bias_dim[] ={1, c2, 1, 1, 1};
int bias_stride[] = {c2, 1, 1, 1};
CHECKCUDNN(cudnnSetTensorNdDescriptor(data_desc,
CUDNN_DATA_FLOAT, 5, data_dim, data_stride));
CHECKCUDNN(cudnnSetTensorNdDescriptor(der_out_desc,
CUDNN_DATA_FLOAT, 5, der_out_dim, der_out_stride));
CHECKCUDNN(cudnnSetTensorNdDescriptor(bias_desc,
CUDNN_DATA_FLOAT, 5, bias_dim, bias_stride));
CHECKCUDNN(cudnnSetFilterNdDescriptor(filter_desc,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 5, filter_dim));
int dilate[]={1,1,1};
CHECKCUDNN(cudnnSetConvolutionNdDescriptor(conv_desc,
3, pad, stride, dilate, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
cudnnConvolutionBwdFilterAlgo_t conv_filter_algo;
CHECKCUDNN(cudnnGetConvolutionBackwardFilterAlgorithm(handle,
data_desc, der_out_desc, conv_desc, filter_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &conv_filter_algo));
cudnnConvolutionBwdDataAlgo_t conv_data_algo;
CHECKCUDNN(cudnnGetConvolutionBackwardDataAlgorithm(handle,
filter_desc, der_out_desc, conv_desc, data_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &conv_data_algo));
size_t workspace_filter_size = 0, workspace_data_size = 0;
CHECKCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize(handle,
data_desc, der_out_desc, conv_desc, filter_desc, conv_filter_algo,
&workspace_filter_size));
CHECKCUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize(handle,
filter_desc, der_out_desc, conv_desc, data_desc, conv_data_algo,
&workspace_data_size));
size_t workspace_size = MAX(workspace_filter_size,
workspace_data_size);
void *d_workspace;
hipMalloc(&d_workspace, workspace_size);
CudaMxArray5D der_in(h,w,d,c1,1);
CudaMxArray5D der_filter(fh,fw,fd,c1,c2);
CudaMxArray2D der_bias(c2,1);
der_in.Wrap(der_in_ptr);
der_filter.Wrap(der_filter_ptr);
der_bias.Wrap(der_bias_ptr);
float alpha = 1, beta = 0;
// derBias
CHECKCUDNN(cudnnConvolutionBackwardBias(handle,
&alpha, der_out_desc, der_out.data(), &beta, bias_desc, der_bias.data()));
// derFilter
CHECKCUDNN(cudnnConvolutionBackwardFilter(handle,
&alpha, data_desc, in.data(), der_out_desc, der_out.data(),
conv_desc, conv_filter_algo, d_workspace, workspace_size, &beta,
filter_desc, der_filter.data()));
// derInput
CHECKCUDNN(cudnnConvolutionBackwardData(handle,
&alpha, filter_desc, filter.data(), der_out_desc, der_out.data(),
conv_desc, conv_data_algo, d_workspace, workspace_size, &beta,
data_desc, der_in.data()));
hipFree(d_workspace);
cudnnDestroyTensorDescriptor(data_desc);
cudnnDestroyTensorDescriptor(der_out_desc);
cudnnDestroyTensorDescriptor(bias_desc);
cudnnDestroyFilterDescriptor(filter_desc);
cudnnDestroyConvolutionDescriptor(conv_desc);
cudnnDestroy(handle);
der_in.Destroy();
der_filter.Destroy();
der_bias.Destroy();
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
mxInitGPU();
if (nrhs == 5) { // FORWARD
CudaMxArray5D in(prhs[0]);
CudaMxArray5D filter(prhs[1]);
CudaMxArray2D bias(prhs[2]);
const int *pad = (int*)mxGetData(prhs[3]);
const int *stride = (int*)mxGetData(prhs[4]);
int pad_rev[] = {pad[2], pad[1], pad[0]};
int stride_rev[] = {stride[2], stride[1], stride[0]};
ConvForward(in, filter, bias, pad_rev, stride_rev, &plhs[0]);
in.Destroy();
filter.Destroy();
bias.Destroy();
} else if (nrhs == 6) { // BACKWARD
CudaMxArray5D in(prhs[0]);
CudaMxArray5D filter(prhs[1]);
CudaMxArray2D bias(prhs[2]);
CudaMxArray5D der_out(prhs[3]);
const int *pad = (int*)mxGetData(prhs[4]);
const int *stride = (int*)mxGetData(prhs[5]);
int pad_rev[] = {pad[2], pad[1], pad[0]};
int stride_rev[] = {stride[2], stride[1], stride[0]};
ConvBackward(in, filter, bias, der_out, pad_rev, stride_rev, &plhs[0],
&plhs[1], &plhs[2]); // der_in, der_filter, der_bias
in.Destroy();
der_out.Destroy();
filter.Destroy();
bias.Destroy();
} else {
ERROR("invalid number of input parameters.");
}
}
|
ff242f40e48cb0dace0db70529c5ad41a83378c1.cu
|
#include <mex.h>
#include <gpu/mxGPUArray.h>
#include "cuda/cudamx_cudnn.cuh"
#define MAX(a,b) ((a)>(b))? (a) : (b)
#define ERROR(msg,...) mexErrMsgIdAndTxt("mex3DConv:err", msg, ##__VA_ARGS__)
void ConvForward(const CudaMxArray5D& in, const CudaMxArray5D& filter,
const CudaMxArray2D& bias, const int* pad,
const int* stride, mxArray **out_ptr) {
// check input validity
int c1 = in.channel();
int c2 = filter.channel2();
int h = in.height();
int w = in.width();
int d = in.depth();
int fh = filter.height();
int fw = filter.width();
int fd = filter.depth();
if (c1 != filter.channel()) {
ERROR("(# input channels: %d) != (# filter channels: %d).",
c1, filter.channel());
}
if (c2 != bias.height()) {
ERROR("(# out channels: %d) != (# bias channels: %d).",
c2, bias.height());
}
cudnnHandle_t handle;
CHECKCUDNN(cudnnCreate(&handle));
cudnnTensorDescriptor_t input_desc, output_desc, bias_desc;
cudnnFilterDescriptor_t filter_desc;
cudnnConvolutionDescriptor_t conv_desc ;
CHECKCUDNN(cudnnCreateTensorDescriptor(&input_desc));
CHECKCUDNN(cudnnCreateTensorDescriptor(&output_desc));
CHECKCUDNN(cudnnCreateTensorDescriptor(&bias_desc)) ;
CHECKCUDNN(cudnnCreateFilterDescriptor(&filter_desc));
CHECKCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc));
int in_dim[] = {1, c1, d, w, h};
int in_stride[] = {c1*d*w*h, d*w*h, w*h, h, 1};
int filter_dim[] = {c2, c1, fd, fw, fh};
int bias_dim[] ={1, c2, 1, 1, 1};
int bias_stride[] = {c2, 1, 1, 1};
CHECKCUDNN(cudnnSetTensorNdDescriptor(input_desc,
CUDNN_DATA_FLOAT, 5, in_dim, in_stride));
CHECKCUDNN(cudnnSetTensorNdDescriptor(bias_desc,
CUDNN_DATA_FLOAT, 5, bias_dim, bias_stride));
CHECKCUDNN(cudnnSetFilterNdDescriptor(filter_desc,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 5, filter_dim));
int dilate[] ={1,1,1};
CHECKCUDNN(cudnnSetConvolutionNdDescriptor(conv_desc,
3, pad, stride, dilate, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
int out_dim[5];
CHECKCUDNN(cudnnGetConvolutionNdForwardOutputDim(conv_desc,
input_desc, filter_desc, 5, out_dim));
if (c2 != out_dim[1]) {
ERROR("(# input out channels: %d) != (# conv out channels: %d).",
c2, out_dim[1]);
}
int oh = out_dim[4];
int ow = out_dim[3];
int od = out_dim[2];
int out_stride[] = {c2*od*ow*oh, od*ow*oh, ow*oh, oh, 1};
CHECKCUDNN(cudnnSetTensorNdDescriptor(output_desc,
CUDNN_DATA_FLOAT, 5, out_dim, out_stride));
cudnnConvolutionFwdAlgo_t conv_algo;
CHECKCUDNN(cudnnGetConvolutionForwardAlgorithm(handle,
input_desc, filter_desc, conv_desc, output_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv_algo));
size_t workspace_size = 0;
CHECKCUDNN(cudnnGetConvolutionForwardWorkspaceSize(handle,
input_desc, filter_desc, conv_desc, output_desc, conv_algo,
&workspace_size));
CudaMxArray5D out(oh,ow,od,c2,out_dim[0]);
out.Wrap(out_ptr);
void *d_workspace;
cudaMalloc(&d_workspace, workspace_size);
float alpha = 1, beta = 0;
CHECKCUDNN(cudnnConvolutionForward(handle,
&alpha, input_desc, in.data(), filter_desc, filter.data(),
conv_desc, conv_algo, d_workspace, workspace_size, &beta,
output_desc, out.data()));
beta = 1.0;
CHECKCUDNN(cudnnAddTensor(handle,
&alpha, bias_desc, bias.data(), &beta, output_desc,
out.data()));
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(input_desc);
cudnnDestroyTensorDescriptor(output_desc);
cudnnDestroyTensorDescriptor(bias_desc);
cudnnDestroyFilterDescriptor(filter_desc);
cudnnDestroyConvolutionDescriptor(conv_desc);
cudnnDestroy(handle);
out.Destroy();
}
void ConvBackward(const CudaMxArray5D& in, const CudaMxArray5D& filter,
const CudaMxArray2D& bias, const CudaMxArray5D& der_out,
const int* pad, const int* stride,
mxArray **der_in_ptr, mxArray **der_filter_ptr,
mxArray **der_bias_ptr) {
// check input validity
int c1 = in.channel();
int c2 = filter.channel2();
int h = in.height();
int w = in.width();
int d = in.depth();
int oh = der_out.height();
int ow = der_out.width();
int od = der_out.depth();
int fh = filter.height();
int fw = filter.width();
int fd = filter.depth();
if (c1 != filter.channel()) {
ERROR("(# input channels: %d) != (# filter channels: %d).",
c1, filter.channel());
}
if (c2 != bias.height()) {
ERROR("(# out channels: %d) != (# bias channels: %d).",
c2, bias.height());
}
cudnnHandle_t handle;
CHECKCUDNN(cudnnCreate(&handle));
cudnnTensorDescriptor_t der_out_desc, data_desc, bias_desc;
cudnnFilterDescriptor_t filter_desc;
cudnnConvolutionDescriptor_t conv_desc ;
CHECKCUDNN(cudnnCreateTensorDescriptor(&der_out_desc));
CHECKCUDNN(cudnnCreateTensorDescriptor(&data_desc));
CHECKCUDNN(cudnnCreateTensorDescriptor(&bias_desc));
CHECKCUDNN(cudnnCreateFilterDescriptor(&filter_desc));
CHECKCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc));
int data_dim[] = {1, c1, d, w, h};
int data_stride[] = {c1*d*w*h, d*w*h, w*h, h, 1};
int der_out_dim[] = {1, c2, od, ow, oh};
int der_out_stride[] = {c2*od*ow*oh, od*ow*oh, ow*oh, oh, 1};
int filter_dim[] = {c2, c1, fd, fw, fh};
int bias_dim[] ={1, c2, 1, 1, 1};
int bias_stride[] = {c2, 1, 1, 1};
CHECKCUDNN(cudnnSetTensorNdDescriptor(data_desc,
CUDNN_DATA_FLOAT, 5, data_dim, data_stride));
CHECKCUDNN(cudnnSetTensorNdDescriptor(der_out_desc,
CUDNN_DATA_FLOAT, 5, der_out_dim, der_out_stride));
CHECKCUDNN(cudnnSetTensorNdDescriptor(bias_desc,
CUDNN_DATA_FLOAT, 5, bias_dim, bias_stride));
CHECKCUDNN(cudnnSetFilterNdDescriptor(filter_desc,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 5, filter_dim));
int dilate[]={1,1,1};
CHECKCUDNN(cudnnSetConvolutionNdDescriptor(conv_desc,
3, pad, stride, dilate, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
cudnnConvolutionBwdFilterAlgo_t conv_filter_algo;
CHECKCUDNN(cudnnGetConvolutionBackwardFilterAlgorithm(handle,
data_desc, der_out_desc, conv_desc, filter_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &conv_filter_algo));
cudnnConvolutionBwdDataAlgo_t conv_data_algo;
CHECKCUDNN(cudnnGetConvolutionBackwardDataAlgorithm(handle,
filter_desc, der_out_desc, conv_desc, data_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &conv_data_algo));
size_t workspace_filter_size = 0, workspace_data_size = 0;
CHECKCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize(handle,
data_desc, der_out_desc, conv_desc, filter_desc, conv_filter_algo,
&workspace_filter_size));
CHECKCUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize(handle,
filter_desc, der_out_desc, conv_desc, data_desc, conv_data_algo,
&workspace_data_size));
size_t workspace_size = MAX(workspace_filter_size,
workspace_data_size);
void *d_workspace;
cudaMalloc(&d_workspace, workspace_size);
CudaMxArray5D der_in(h,w,d,c1,1);
CudaMxArray5D der_filter(fh,fw,fd,c1,c2);
CudaMxArray2D der_bias(c2,1);
der_in.Wrap(der_in_ptr);
der_filter.Wrap(der_filter_ptr);
der_bias.Wrap(der_bias_ptr);
float alpha = 1, beta = 0;
// derBias
CHECKCUDNN(cudnnConvolutionBackwardBias(handle,
&alpha, der_out_desc, der_out.data(), &beta, bias_desc, der_bias.data()));
// derFilter
CHECKCUDNN(cudnnConvolutionBackwardFilter(handle,
&alpha, data_desc, in.data(), der_out_desc, der_out.data(),
conv_desc, conv_filter_algo, d_workspace, workspace_size, &beta,
filter_desc, der_filter.data()));
// derInput
CHECKCUDNN(cudnnConvolutionBackwardData(handle,
&alpha, filter_desc, filter.data(), der_out_desc, der_out.data(),
conv_desc, conv_data_algo, d_workspace, workspace_size, &beta,
data_desc, der_in.data()));
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(data_desc);
cudnnDestroyTensorDescriptor(der_out_desc);
cudnnDestroyTensorDescriptor(bias_desc);
cudnnDestroyFilterDescriptor(filter_desc);
cudnnDestroyConvolutionDescriptor(conv_desc);
cudnnDestroy(handle);
der_in.Destroy();
der_filter.Destroy();
der_bias.Destroy();
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
mxInitGPU();
if (nrhs == 5) { // FORWARD
CudaMxArray5D in(prhs[0]);
CudaMxArray5D filter(prhs[1]);
CudaMxArray2D bias(prhs[2]);
const int *pad = (int*)mxGetData(prhs[3]);
const int *stride = (int*)mxGetData(prhs[4]);
int pad_rev[] = {pad[2], pad[1], pad[0]};
int stride_rev[] = {stride[2], stride[1], stride[0]};
ConvForward(in, filter, bias, pad_rev, stride_rev, &plhs[0]);
in.Destroy();
filter.Destroy();
bias.Destroy();
} else if (nrhs == 6) { // BACKWARD
CudaMxArray5D in(prhs[0]);
CudaMxArray5D filter(prhs[1]);
CudaMxArray2D bias(prhs[2]);
CudaMxArray5D der_out(prhs[3]);
const int *pad = (int*)mxGetData(prhs[4]);
const int *stride = (int*)mxGetData(prhs[5]);
int pad_rev[] = {pad[2], pad[1], pad[0]};
int stride_rev[] = {stride[2], stride[1], stride[0]};
ConvBackward(in, filter, bias, der_out, pad_rev, stride_rev, &plhs[0],
&plhs[1], &plhs[2]); // der_in, der_filter, der_bias
in.Destroy();
der_out.Destroy();
filter.Destroy();
bias.Destroy();
} else {
ERROR("invalid number of input parameters.");
}
}
|
c413e7b34de0430dea7912c9ab4e4f8e23036e5a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <helper_timer.h>
#include "../matrix/matrix.h"
#include "../matrix/csr.h"
#include "../matrix/ellpack.h"
#include "../io/iomanager.h"
#include "../utils/utils.h"
unsigned int vm_thr_block = 512, scalar_thr_block = 512, n_blocks_vm, n_blocks_scalar;
void cudaCheckError(int line) {
hipError_t e = hipGetLastError();
if(e != hipSuccess) {
printf("Cuda failure %s:%d: '%s'\n", __FILE__, line, hipGetErrorString(e));
}
}
__global__ void scalarCSR(int * m, int * irp, int * ja, double * as, double * x, double * y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < *m) {
double temp = 0.0;
for (int j = irp[i]; j < irp[i + 1]; ++j) {
temp += as[j] * x[ja[j]];
}
y[i] = temp;
}
}
__device__ void warpReduce(volatile double *sdata, int &tid, int &lane, int &warp_size) {
if (warp_size == 32) { if (lane < 16) { sdata[tid] += sdata[tid + 16]; } }
if (lane < 8) { sdata[tid] += sdata[tid + 8]; }
if (lane < 4) { sdata[tid] += sdata[tid + 4]; }
if (lane < 2) { sdata[tid] += sdata[tid + 2]; }
if (lane < 1) { sdata[tid] += sdata[tid + 1]; }
}
__global__ void vectorMiningCSR(int * m, int * d_warp_size, int * irp, int * ja, double * as, double * x, double * y) {
extern __shared__ double sdata[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
int warp_size = *d_warp_size;
int warp = i / warp_size;
int lane = i & (warp_size - 1);
int row = warp;
sdata[tid] = 0;
if (row < *m) {
for (int j = irp[row] + lane ; j < irp[row + 1]; j += warp_size)
sdata[tid] += as[j] * x[ja[j]];
warpReduce(sdata, tid, lane, warp_size);
if (lane == 0)
y[row] += sdata[tid];
}
}
__global__ void scalarEllpack(int * m, int * ja, double * as, double * x, double * y, int * maxnz) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int rows = *m, maximum_nz = *maxnz;
if (i < rows) {
double temp = 0.0;
for (unsigned int j = 0; j < *maxnz; ++j) {
temp += as[i * maximum_nz + j] * x[ja[i * maximum_nz + j]];
}
y[i] = temp;
}
}
void allocateCSR(CSR * &csr, int * &irp, int * &ja, double * &as, double * &x, double *&y, int &m, int &n) {
int nz = csr->getnz();
hipMalloc((void**)&irp, sizeof(int) * (n + 1));
hipMalloc((void**)&ja, sizeof(int) * nz);
hipMalloc((void**)&as, sizeof(double) * nz);
hipMalloc((void**)&x, sizeof(double) * n);
hipMalloc((void**)&y, sizeof(double) * n);
hipMemcpy(irp, csr->irp, sizeof(int) * (n + 1), hipMemcpyHostToDevice);
hipMemcpy(ja, csr->getja(), sizeof(int) * nz, hipMemcpyHostToDevice);
hipMemcpy(as, csr->getas(), sizeof(double) * nz, hipMemcpyHostToDevice);
hipMemcpy(x, csr->getX(), sizeof(double) * n, hipMemcpyHostToDevice);
hipMemcpy(y, csr->y, sizeof(double) * n, hipMemcpyHostToDevice);
}
void allocateEllpack(Ellpack * &ellpack, int * &ja, double * &as, double * &x, double * &y, int * &maxnz, int &m, int &n) {
long long int host_maxnz = ellpack->getmaxnz();
long long int rows = m;
int * host_ja = ellpack->get1Dja();
double * host_as = ellpack->get1Das();
hipMalloc((void**)&ja, sizeof(int) * rows * host_maxnz);
hipMalloc((void**)&as, sizeof(double) * rows * host_maxnz);
hipMalloc((void**)&x, sizeof(double) * rows);
hipMalloc((void**)&y, sizeof(double) * rows);
hipMalloc((void**)&maxnz, sizeof(int));
hipMemcpy(ja, host_ja, sizeof(int) * rows * host_maxnz, hipMemcpyHostToDevice);
hipMemcpy(as, host_as, sizeof(double) * rows * host_maxnz, hipMemcpyHostToDevice);
hipMemcpy(x, ellpack->getX(), sizeof(double) * rows, hipMemcpyHostToDevice);
hipMemcpy(y, ellpack->y, sizeof(double) * m, hipMemcpyHostToDevice);
hipMemcpy(maxnz, &host_maxnz, sizeof(int), hipMemcpyHostToDevice);
}
void deallocateCSR(int * &irp, int * &ja, double * &as, double * &x, double *&y) {
hipFree(irp);
hipFree(ja);
hipFree(as);
hipFree(x);
hipFree(y);
}
void deallocateEllpack(int * &ja, double * &as, double * &x, double * &y, int * &maxnz) {
hipFree(ja);
hipFree(as);
hipFree(x);
hipFree(y);
hipFree(maxnz);
}
void collectResults(CSR * &csr, Ellpack * &ellpack, double * &csr_y, double * &ellpack_y, int &n) {
hipMemcpy(csr->y, csr_y, sizeof(double) * n, hipMemcpyDeviceToHost);
hipMemcpy(ellpack->y, ellpack_y, sizeof(double) * n, hipMemcpyDeviceToHost);
}
void getBlockNumbers(int m, int &warp_size) {
n_blocks_scalar = m / scalar_thr_block;
if (m % scalar_thr_block > 0.0) {
n_blocks_scalar++;
}
n_blocks_vm = (m * warp_size) / vm_thr_block;
if ((m * warp_size) % vm_thr_block > 0.0) {
n_blocks_vm++;
}
if (n_blocks_vm > MAX_N_BLOCKS) {
vm_thr_block *= 2;
n_blocks_vm = (m * warp_size) / vm_thr_block;
if ((m * warp_size) % vm_thr_block > 0.0) {
n_blocks_vm++;
}
}
if (n_blocks_vm > MAX_N_BLOCKS) {
warp_size /= 2;
n_blocks_vm = (m * warp_size) / vm_thr_block;
if ((m * warp_size) % vm_thr_block > 0.0) {
n_blocks_vm++;
}
}
}
void solveCuda(IOmanager * io, std::string path, CSR * &csr, Ellpack * &ellpack) {
int m = csr->getRows();
int n = csr->getCols();
int warp_size = 32, *d_warp_size;
StopWatchInterface* timer = 0;
sdkCreateTimer(&timer);
getBlockNumbers(m, warp_size);
const int shmem_size = vm_thr_block * sizeof(double);
int * csr_irp, * csr_ja, * ellpack_ja, * maxnz, * rows;
double * csr_as, * csr_x, * csr_y, * ellpack_as, * ellpack_x, * ellpack_y;
if (csr->fitsInMemory()) {
allocateCSR(csr, csr_irp, csr_ja, csr_as, csr_x, csr_y, m, n);
}
if (ellpack->fitsInMemory()) {
allocateEllpack(ellpack, ellpack_ja, ellpack_as, ellpack_x, ellpack_y, maxnz, m, n);
}
cudaCheckError(__LINE__);
hipMalloc((void**)&rows, sizeof(int));
hipMemcpy(rows, &m, sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&d_warp_size, sizeof(int));
hipMemcpy(d_warp_size, &warp_size, sizeof(int), hipMemcpyHostToDevice);
cudaCheckError(__LINE__);
for (int k = 0; k < NR_RUNS + 1; ++k) {
if (csr->fitsInMemory()) {
timer->start();
hipLaunchKernelGGL(( scalarCSR), dim3(n_blocks_scalar), dim3(scalar_thr_block), 0, 0, rows, csr_irp, csr_ja, csr_as, csr_x, csr_y);
timer->stop();
csr->trackCSRTime(SCALAR, timer->getTime());
timer->reset();
hipMemset(csr_y, 0.0, sizeof(double) * m);
cudaCheckError(__LINE__);
timer->start();
hipLaunchKernelGGL(( vectorMiningCSR), dim3(n_blocks_vm), dim3(vm_thr_block), shmem_size, 0, rows, d_warp_size, csr_irp, csr_ja, csr_as, csr_x, csr_y);
timer->stop();
csr->trackCSRTime(VECTOR_MINING, timer->getTime());
timer->reset();
cudaCheckError(__LINE__);
}
if (ellpack->fitsInMemory()) {
timer->start();
hipLaunchKernelGGL(( scalarEllpack), dim3(n_blocks_scalar), dim3(scalar_thr_block), 0, 0, rows, ellpack_ja, ellpack_as, ellpack_x, ellpack_y, maxnz);
timer->stop();
ellpack->trackTime(timer->getTime());
timer->reset();
cudaCheckError(__LINE__);
}
if (k != NR_RUNS) {
if (csr->fitsInMemory()) hipMemset(csr_y, 0.0, sizeof(double) * m);
if (ellpack->fitsInMemory()) hipMemset(ellpack_y, 0.0, sizeof(double) * m);
}
}
collectResults(csr, ellpack, csr_y, ellpack_y, n);
cudaCheckError(__LINE__);
deallocateCSR(csr_irp, csr_ja, csr_as, csr_x, csr_y);
deallocateEllpack(ellpack_ja, ellpack_as, ellpack_x, ellpack_y, maxnz);
cudaCheckError(__LINE__);
io->exportResults(CUDA, path, csr, ellpack);
}
|
c413e7b34de0430dea7912c9ab4e4f8e23036e5a.cu
|
#include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <helper_timer.h>
#include "../matrix/matrix.h"
#include "../matrix/csr.h"
#include "../matrix/ellpack.h"
#include "../io/iomanager.h"
#include "../utils/utils.h"
unsigned int vm_thr_block = 512, scalar_thr_block = 512, n_blocks_vm, n_blocks_scalar;
void cudaCheckError(int line) {
cudaError_t e = cudaGetLastError();
if(e != cudaSuccess) {
printf("Cuda failure %s:%d: '%s'\n", __FILE__, line, cudaGetErrorString(e));
}
}
__global__ void scalarCSR(int * m, int * irp, int * ja, double * as, double * x, double * y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < *m) {
double temp = 0.0;
for (int j = irp[i]; j < irp[i + 1]; ++j) {
temp += as[j] * x[ja[j]];
}
y[i] = temp;
}
}
__device__ void warpReduce(volatile double *sdata, int &tid, int &lane, int &warp_size) {
if (warp_size == 32) { if (lane < 16) { sdata[tid] += sdata[tid + 16]; } }
if (lane < 8) { sdata[tid] += sdata[tid + 8]; }
if (lane < 4) { sdata[tid] += sdata[tid + 4]; }
if (lane < 2) { sdata[tid] += sdata[tid + 2]; }
if (lane < 1) { sdata[tid] += sdata[tid + 1]; }
}
__global__ void vectorMiningCSR(int * m, int * d_warp_size, int * irp, int * ja, double * as, double * x, double * y) {
extern __shared__ double sdata[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
int warp_size = *d_warp_size;
int warp = i / warp_size;
int lane = i & (warp_size - 1);
int row = warp;
sdata[tid] = 0;
if (row < *m) {
for (int j = irp[row] + lane ; j < irp[row + 1]; j += warp_size)
sdata[tid] += as[j] * x[ja[j]];
warpReduce(sdata, tid, lane, warp_size);
if (lane == 0)
y[row] += sdata[tid];
}
}
__global__ void scalarEllpack(int * m, int * ja, double * as, double * x, double * y, int * maxnz) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int rows = *m, maximum_nz = *maxnz;
if (i < rows) {
double temp = 0.0;
for (unsigned int j = 0; j < *maxnz; ++j) {
temp += as[i * maximum_nz + j] * x[ja[i * maximum_nz + j]];
}
y[i] = temp;
}
}
void allocateCSR(CSR * &csr, int * &irp, int * &ja, double * &as, double * &x, double *&y, int &m, int &n) {
int nz = csr->getnz();
cudaMalloc((void**)&irp, sizeof(int) * (n + 1));
cudaMalloc((void**)&ja, sizeof(int) * nz);
cudaMalloc((void**)&as, sizeof(double) * nz);
cudaMalloc((void**)&x, sizeof(double) * n);
cudaMalloc((void**)&y, sizeof(double) * n);
cudaMemcpy(irp, csr->irp, sizeof(int) * (n + 1), cudaMemcpyHostToDevice);
cudaMemcpy(ja, csr->getja(), sizeof(int) * nz, cudaMemcpyHostToDevice);
cudaMemcpy(as, csr->getas(), sizeof(double) * nz, cudaMemcpyHostToDevice);
cudaMemcpy(x, csr->getX(), sizeof(double) * n, cudaMemcpyHostToDevice);
cudaMemcpy(y, csr->y, sizeof(double) * n, cudaMemcpyHostToDevice);
}
void allocateEllpack(Ellpack * &ellpack, int * &ja, double * &as, double * &x, double * &y, int * &maxnz, int &m, int &n) {
long long int host_maxnz = ellpack->getmaxnz();
long long int rows = m;
int * host_ja = ellpack->get1Dja();
double * host_as = ellpack->get1Das();
cudaMalloc((void**)&ja, sizeof(int) * rows * host_maxnz);
cudaMalloc((void**)&as, sizeof(double) * rows * host_maxnz);
cudaMalloc((void**)&x, sizeof(double) * rows);
cudaMalloc((void**)&y, sizeof(double) * rows);
cudaMalloc((void**)&maxnz, sizeof(int));
cudaMemcpy(ja, host_ja, sizeof(int) * rows * host_maxnz, cudaMemcpyHostToDevice);
cudaMemcpy(as, host_as, sizeof(double) * rows * host_maxnz, cudaMemcpyHostToDevice);
cudaMemcpy(x, ellpack->getX(), sizeof(double) * rows, cudaMemcpyHostToDevice);
cudaMemcpy(y, ellpack->y, sizeof(double) * m, cudaMemcpyHostToDevice);
cudaMemcpy(maxnz, &host_maxnz, sizeof(int), cudaMemcpyHostToDevice);
}
void deallocateCSR(int * &irp, int * &ja, double * &as, double * &x, double *&y) {
cudaFree(irp);
cudaFree(ja);
cudaFree(as);
cudaFree(x);
cudaFree(y);
}
void deallocateEllpack(int * &ja, double * &as, double * &x, double * &y, int * &maxnz) {
cudaFree(ja);
cudaFree(as);
cudaFree(x);
cudaFree(y);
cudaFree(maxnz);
}
void collectResults(CSR * &csr, Ellpack * &ellpack, double * &csr_y, double * &ellpack_y, int &n) {
cudaMemcpy(csr->y, csr_y, sizeof(double) * n, cudaMemcpyDeviceToHost);
cudaMemcpy(ellpack->y, ellpack_y, sizeof(double) * n, cudaMemcpyDeviceToHost);
}
void getBlockNumbers(int m, int &warp_size) {
n_blocks_scalar = m / scalar_thr_block;
if (m % scalar_thr_block > 0.0) {
n_blocks_scalar++;
}
n_blocks_vm = (m * warp_size) / vm_thr_block;
if ((m * warp_size) % vm_thr_block > 0.0) {
n_blocks_vm++;
}
if (n_blocks_vm > MAX_N_BLOCKS) {
vm_thr_block *= 2;
n_blocks_vm = (m * warp_size) / vm_thr_block;
if ((m * warp_size) % vm_thr_block > 0.0) {
n_blocks_vm++;
}
}
if (n_blocks_vm > MAX_N_BLOCKS) {
warp_size /= 2;
n_blocks_vm = (m * warp_size) / vm_thr_block;
if ((m * warp_size) % vm_thr_block > 0.0) {
n_blocks_vm++;
}
}
}
void solveCuda(IOmanager * io, std::string path, CSR * &csr, Ellpack * &ellpack) {
int m = csr->getRows();
int n = csr->getCols();
int warp_size = 32, *d_warp_size;
StopWatchInterface* timer = 0;
sdkCreateTimer(&timer);
getBlockNumbers(m, warp_size);
const int shmem_size = vm_thr_block * sizeof(double);
int * csr_irp, * csr_ja, * ellpack_ja, * maxnz, * rows;
double * csr_as, * csr_x, * csr_y, * ellpack_as, * ellpack_x, * ellpack_y;
if (csr->fitsInMemory()) {
allocateCSR(csr, csr_irp, csr_ja, csr_as, csr_x, csr_y, m, n);
}
if (ellpack->fitsInMemory()) {
allocateEllpack(ellpack, ellpack_ja, ellpack_as, ellpack_x, ellpack_y, maxnz, m, n);
}
cudaCheckError(__LINE__);
cudaMalloc((void**)&rows, sizeof(int));
cudaMemcpy(rows, &m, sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_warp_size, sizeof(int));
cudaMemcpy(d_warp_size, &warp_size, sizeof(int), cudaMemcpyHostToDevice);
cudaCheckError(__LINE__);
for (int k = 0; k < NR_RUNS + 1; ++k) {
if (csr->fitsInMemory()) {
timer->start();
scalarCSR<<<n_blocks_scalar, scalar_thr_block>>>(rows, csr_irp, csr_ja, csr_as, csr_x, csr_y);
timer->stop();
csr->trackCSRTime(SCALAR, timer->getTime());
timer->reset();
cudaMemset(csr_y, 0.0, sizeof(double) * m);
cudaCheckError(__LINE__);
timer->start();
vectorMiningCSR<<<n_blocks_vm, vm_thr_block, shmem_size>>>(rows, d_warp_size, csr_irp, csr_ja, csr_as, csr_x, csr_y);
timer->stop();
csr->trackCSRTime(VECTOR_MINING, timer->getTime());
timer->reset();
cudaCheckError(__LINE__);
}
if (ellpack->fitsInMemory()) {
timer->start();
scalarEllpack<<<n_blocks_scalar, scalar_thr_block>>>(rows, ellpack_ja, ellpack_as, ellpack_x, ellpack_y, maxnz);
timer->stop();
ellpack->trackTime(timer->getTime());
timer->reset();
cudaCheckError(__LINE__);
}
if (k != NR_RUNS) {
if (csr->fitsInMemory()) cudaMemset(csr_y, 0.0, sizeof(double) * m);
if (ellpack->fitsInMemory()) cudaMemset(ellpack_y, 0.0, sizeof(double) * m);
}
}
collectResults(csr, ellpack, csr_y, ellpack_y, n);
cudaCheckError(__LINE__);
deallocateCSR(csr_irp, csr_ja, csr_as, csr_x, csr_y);
deallocateEllpack(ellpack_ja, ellpack_as, ellpack_x, ellpack_y, maxnz);
cudaCheckError(__LINE__);
io->exportResults(CUDA, path, csr, ellpack);
}
|
0a21ea58e6b95d3b6c834a6bae007a4aea565159.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* FullyConnectedLayer.cpp
*
* Created on: 2016. 5. 10.
* Author: jhkim
*/
#include "hip/hip_runtime.h"
#include <algorithm>
#include "FullyConnectedLayer.h"
#include "MathFunctions.h"
#include "Util.h"
#include "Network.h"
#include "SysLog.h"
#include "StdOutLog.h"
#include "PropMgmt.h"
#include "Update.h"
#include "Updater.h"
#include "Donator.h"
#include "frcnn_common.h"
#define FULLYCONNECTEDLAYER_LOG 0
using namespace std;
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
/**
* Fills a floating-point array with ones.
*
* @param vec The array to fill.
* @param size The number of elements in the array.
*/
template <typename Dtype>
__global__ void FillValues(Dtype *vec, int size, Dtype value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = value;
}
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
/**
* Fills a floating-point array with ones.
*
* @param vec The array to fill.
* @param size The number of elements in the array.
*/
template <typename Dtype>
__global__ void Dropout(const int n, const Dtype* in, const Dtype* mask,
const unsigned int threashold, const float scale, Dtype *out)
{
CUDA_KERNEL_LOOP(index, n) {
//out[index] = in[index] * (mask[index] > threshold) * scale;
out[index] = in[index] * (mask[index]) * scale;
}
}
/**
* dst array src array .
*
* @param dst dst array, dst + src
* @param src src array
* @param N The number of elements in the array.
*/
template <typename Dtype>
__global__ void AddData(Dtype* dst, const Dtype* src, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
dst[idx] = dst[idx] + src[idx];
}
template <typename Dtype>
FullyConnectedLayer<Dtype>::~FullyConnectedLayer() {
if (SLPROP(FullyConnected, receive)) {
Donator<Dtype>::releaseReceiver(SLPROP(FullyConnected, donatorID));
} else {
Util::clearVector(this->_params);
Util::clearVector(this->_paramsHistory);
Util::clearVector(this->_paramsHistory2);
}
checkCudaErrors(hipFree(this->d_onevec));
this->updateParams.clear();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::reshape() {
if (!Layer<Dtype>::_adjustInputShape()) {
const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1);
const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1);
SASSERT0(count == inputDataCount);
}
/*
// .
const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1);
const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1);
if (inputDataCount == count)
return;
*/
// XXX:
// batch .
// batch .
if (!Layer<Dtype>::_isInputShapeChanged(0))
return;
this->batches = this->_inputData[0]->getShape(0);
this->in_rows = this->_inputData[0]->getCountByAxis(SLPROP(FullyConnected, axis));
this->out_rows = SLPROP(FullyConnected, nOut);
const uint32_t channels = 1;
const uint32_t cols = 1;
//this->_inputShape[0] = {batches, channels, in_rows, cols};
this->_inputShape[0] = this->_inputData[0]->getShape();
this->_outputData[0]->reshape({this->batches, channels, this->out_rows, cols});
/*
checkCUDNN(cudnnSetTensor4dDescriptor(
this->inputTensorDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
this->batches, channels, this->in_rows, cols));
checkCUDNN(cudnnSetTensor4dDescriptor(
this->outputTensorDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
this->batches, channels, this->out_rows, cols));
*/
STDOUT_COND_LOG(FULLYCONNECTEDLAYER_LOG,
"<%s> layer' input-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), this->batches, channels, this->in_rows, cols);
STDOUT_COND_LOG(FULLYCONNECTEDLAYER_LOG,
"<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), this->batches, channels, this->out_rows, cols);
const uint32_t u_in = in_rows;
const uint32_t u_out = out_rows;
const uint32_t b_in = batches * in_rows;
const uint32_t b_out = batches * out_rows;
STDOUT_COND_LOG(FULLYCONNECTEDLAYER_LOG,
"<%s> layer reshape info (u_in, u_out, b_in, b_out) : %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), u_in, u_out, b_in, b_out);
this->_params[ParamType::Weight]->reshape({1, 1, u_out, u_in});
this->_params[ParamType::Bias]->reshape({1, u_out, 1, 1});
this->_paramsHistory[ParamType::Weight]->reshape({1, 1, u_out, u_in});
this->_paramsHistory[ParamType::Bias]->reshape({1, u_out, 1, 1});;
this->_paramsHistory2[ParamType::Weight]->reshape({1, 1, u_out, u_in});
this->_paramsHistory2[ParamType::Bias]->reshape({1, u_out, 1, 1});
if (!this->_paramsInitialized[Weight]) {
SLPROP(FullyConnected, weightFiller).fill(this->_params[ParamType::Weight]);
this->_paramsInitialized[Weight] = true;
}
if (!this->_paramsInitialized[Bias]) {
SLPROP(FullyConnected, weightFiller).fill(this->_params[ParamType::Bias]);
this->_paramsInitialized[Bias] = true;
}
if (this->updateParams.size() == 0) {
UpdateParam upWeight;
upWeight.paramType = Weight;
upWeight.paramDataPtr = (void*)this->_params[Weight];
upWeight.paramHis1Ptr = (void*)this->_paramsHistory[Weight];
upWeight.paramHis2Ptr = (void*)this->_paramsHistory2[Weight];
this->updateParams.push_back(upWeight);
UpdateParam upBias;
upBias.paramType = Bias;
upBias.paramDataPtr = (void*)this->_params[Bias];
upBias.paramHis1Ptr = (void*)this->_paramsHistory[Bias];
upBias.paramHis2Ptr = (void*)this->_paramsHistory2[Bias];
this->updateParams.push_back(upBias);
}
checkCudaErrors(Util::ucudaMalloc(&this->d_onevec, sizeof(Dtype)*batches));
hipLaunchKernelGGL(( FillValues), dim3(SOOOA_GET_BLOCKS(batches)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
this->d_onevec, batches, 1.0f);
this->_mask.reshape(b_out);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::update() {
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
const uint32_t weightSize = this->in_rows * this->out_rows;
const Dtype regScale =
SNPROP(weightDecay) * SLPROP(FullyConnected, weightUpdateParam).decay_mult;
const Dtype learnScale = Update<Dtype>::calcLearningRate() *
SLPROP(FullyConnected, weightUpdateParam).lr_mult;
const Dtype beta1 = SNPROP(beta1);
const Dtype beta2 = SNPROP(beta2);
SLPROP(FullyConnected, decayedBeta1) *= beta1;
SLPROP(FullyConnected, decayedBeta2) *= beta2;
UpdateContext contextWeight =
Update<Dtype>::makeContext(weightSize, regScale, learnScale);
const uint32_t biasSize = out_rows;
const Dtype regScale_b =
SNPROP(weightDecay) * SLPROP(FullyConnected, biasUpdateParam).decay_mult;
const Dtype learnScale_b = Update<Dtype>::calcLearningRate() *
SLPROP(FullyConnected, biasUpdateParam).lr_mult;
UpdateContext contextBias =
Update<Dtype>::makeContext(biasSize, regScale_b, learnScale_b);
SASSUME0(this->updateParams.size() == 2);
this->updateParams[Weight].context = contextWeight;
this->updateParams[Bias].context = contextBias;
Updater::updateParams(this->updateParams);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::applyChanges(LearnableLayer<Dtype> *targetLayer) {
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
const uint32_t weightSize = this->in_rows * this->out_rows;
const uint32_t biasSize = this->out_rows;
FullyConnectedLayer<Dtype>* _targetLayer = (FullyConnectedLayer<Dtype>*)targetLayer;
//int blockSize = BW;
int blockSize = SOOOA_CUDA_NUM_THREADS;
int gridSize;
gridSize = (weightSize + blockSize -1) / blockSize;
hipLaunchKernelGGL(( AddData), dim3(gridSize), dim3(blockSize), 0, 0,
_targetLayer->_params[Weight]->mutable_device_grad(),
this->_params[Weight]->device_grad(), weightSize);
gridSize = (biasSize + blockSize -1) / blockSize;
hipLaunchKernelGGL(( AddData), dim3(gridSize), dim3(blockSize), 0, 0,
_targetLayer->_params[Bias]->mutable_device_grad(),
this->_params[Bias]->device_grad(), biasSize);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::syncParams(LearnableLayer<Dtype> *targetLayer) {
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
const uint32_t weightSize = this->in_rows * this->out_rows;
const uint32_t biasSize = this->out_rows;
FullyConnectedLayer<Dtype>* _targetLayer = (FullyConnectedLayer<Dtype>*)targetLayer;
memcpy(this->_params[Weight]->mutable_host_grad(), _targetLayer->_params[Weight]->host_grad(),
weightSize);
memcpy(this->_params[Bias]->mutable_host_grad(), _targetLayer->_params[Bias]->host_grad(),
biasSize);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::saveParams(ofstream& ofs) {
LearnableLayer<Dtype>::saveParams(ofs);
/*
if (this->_inputData.size() == 1) {
cout << SLPROP_BASE(name) << " saves as usual ... " << endl;
LearnableLayer<Dtype>::saveParams(ofs);
} else {
cout << SLPROP_BASE(name) << " saves as special ... " << endl;
uint32_t numParams = this->_params.size();
vector<vector<float>> bboxMeans;
vector<vector<float>> bboxStds;
fill2dVecWithData(this->_inputData[1], bboxMeans);
fill2dVecWithData(this->_inputData[2], bboxStds);
#if 0
this->_inputData[1]->print_shape();
this->_inputData[2]->print_shape();
this->_params[0]->print_shape();
this->_params[1]->print_shape();
exit(1);
#endif
Data<Dtype>* param0 = this->_params[0];
Data<Dtype> orig0(param0->_name, true);
orig0.reshapeLike(param0);
const Dtype* srcPtr0 = param0->host_data();
Dtype* dstPtr0 = orig0.mutable_host_data();
const int numRows0 = param0->getShape(2);
const int numCols0 = param0->getShape(3);
int index;
int id1, id2;
for (int row = 0; row < numRows0; row++) {
id2 = row / 4;
id1 = row % 4;
for (int col = 0; col < numCols0; col++) {
index = row * numCols0 + col;
dstPtr0[index] = srcPtr0[index] * bboxStds[id2][id1];
}
}
Data<Dtype>* param1 = this->_params[1];
Data<Dtype> orig1(param1->_name, true);
orig1.reshapeLike(param1);
const Dtype* srcPtr1 = param1->host_data();
Dtype* dstPtr1 = orig1.mutable_host_data();
const int numRows1 = param1->getShape(1);
for (int row = 0; row < numRows1; row++) {
id2 = row / 4;
id1 = row % 4;
dstPtr1[row] = srcPtr1[row] * bboxStds[id2][id1] + bboxMeans[id2][id1];
}
orig0.save(ofs);
orig1.save(ofs);
}
*/
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::feedforward() {
reshape();
_computeWeightedData();
_computeWeightBiasedData();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeWeightedData() {
//const uint32_t batches = this->_inputShape[0][0];
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
// Apply weight to input data
const Dtype* d_weightData = this->_params[Weight]->device_data();
const Dtype* d_inputData = this->_inputData[0]->device_data();
//Dtype* d_preActivationData = _preActivation->mutable_device_data();
Dtype* d_outputData = this->_outputData[0]->mutable_device_data();
/**
* [hipblasSgemm() (from cuBlas User Documentation)]
*
* hipblasStatus_t hipblasSgemm(hipblasHandle_t handle, hipblasOperation_t transa,
* hipblasOperation_t transb, int m, int n, int k,
* const float *alpha, const float *A, int * lda,
* const float *B, int ldb, const float *beta, float *C,
* int ldc)
*
* C = op ( A ) op ( B ) + C
*
* where and are scalars, and A , B and C are matrices stored in column-major format
* with dimensions op ( A ) m k , op ( B ) k n and C m n , respectively. Also, for
* matrix A
*
* op ( A ) = A if transa == HIPBLAS_OP_N A T if transa == HIPBLAS_OP_T A H if transa ==
* HIPBLAS_OP_C
*
* and op ( B ) is defined similarly for matrix B .
*
* hipblasOperation_t option
* (1) HIPBLAS_OP_N => the non-transpose operation is selected.
* (2) HIPBLAS_OP_T => the transpose operation is selected.
* (3) HIPBLAS_OP_C => the conjugate transpose operation is selected.
*
* lda,ldb,ldc => leading dimension of two-dimensional array used to store the matrix A,
* B, C
*/
if (this->batches == 1) {
soooa_gpu_gemv(CblasNoTrans,
this->out_rows, this->in_rows,
Cuda::alpha, d_weightData, d_inputData,
Cuda::beta, d_outputData);
} else {
soooa_gpu_gemm(CblasNoTrans, CblasTrans,
this->batches, this->out_rows, this->in_rows,
Cuda::alpha, d_inputData, d_weightData,
Cuda::beta, d_outputData);
}
/*
checkCudaErrors(hipblasSgemm(Cuda::cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N,
this->out_rows, this->batches, this->in_rows,
&Cuda::alpha, d_weightData, this->out_rows, d_inputData, this->in_rows,
&Cuda::beta, d_outputData, this->out_rows));
*/
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeWeightBiasedData() {
// Add bias to weighted input data
const Dtype* d_biasData = this->_params[Bias]->device_data();
//Dtype* d_preActivationData = _preActivation->mutable_device_data();
Dtype* d_outputData = this->_outputData[0]->mutable_device_data();
this->_params[Bias]->print_data();
if (this->batches == 1) {
soooa_gpu_axpy(this->out_rows, 1.0f, d_biasData, d_outputData);
} else {
soooa_gpu_gemm(CblasNoTrans, CblasNoTrans,
this->batches, this->out_rows, 1,
Cuda::alpha, this->d_onevec, d_biasData,
Cuda::alpha, d_outputData);
}
/*
checkCudaErrors(hipblasSgemm(Cuda::cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N,
this->out_rows, this->batches, 1,
&Cuda::alpha,
d_biasData, this->out_rows,
this->d_onevec, 1,
&Cuda::alpha,
d_outputData, this->out_rows));
*/
this->_params[Bias]->print_data();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::backpropagation() {
/*
* simple network layer .
*
* <<<< ith layer >>>> <<<< i+1th layer >>>>
* ..... Xi Wi Ai Fi Yi (=Xi+1) ........
* Bi
* ..... O --------- O ------------ O ........
* dL/dYi is already computed
*
* ( Xi = i layer input , Wi = i layer weight,
* Bi = i layer bias , Ai = i layer
* Fi = i layer activation function
* Yi = i layer ouput , i+1 layer input
* L = loss, dL/dYi = i+1 layer gradient )
*
* gradient descent dL/dWi & dL/dBi .
* :
* () dYi/dWi = dL/dYi * dYi/dAi * dAi/dWi
* () dYi/dBi = dL/dYi * dYi/dAi * dAi/dBi
*
* (),() 4 .
*
* (A) dL/dYi : i+1 layer backward _outputData[0] grad
* .
*
* (B) dYi/dAi : _computePreActivationGrad() dL/dYi * dYi/dAi .
* dL/dYi Yi, Ai . forward
* _outputData[0] data _preActivation data .
* activation function Yi, Ai, dL/dYi dL/dYi *
* dYi/dAi , this->_preActivation grad .
*
* (C) dAi/dWi : _computeWeightGrad() (A), (B) weight Grad
* . dAi/dWi transpose Xi GEMM
* . _params[Weight] grad .
*
* (D) dAi/dBi : (C) . _computeBiasGrad() bias ,
* _params[Bias] grad .
*
* i-1 layer dL/dYi-1 . _computeInputGrad()
* . _inputData grad . dL/dYi-1 = dL/dXi =
* dL/dAi * dAi/dXi . dL/dAi _preAcitvation grad , dAi/dXi
* Wi transpose .
*/
_computeWeightGrad();
_computeBiasGrad();
_computeInputGrad();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeWeightGrad() {
// d(Cost)/d(Weight)
const Dtype* d_outputGrad = this->_outputData[0]->device_grad();
const Dtype* d_inputData = this->_inputData[0]->device_data();
Dtype* d_weightGrad = this->_params[Weight]->mutable_device_grad();
soooa_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
this->out_rows, this->in_rows, this->batches,
Cuda::alpha, d_outputGrad, d_inputData,
Cuda::alpha, d_weightGrad);
/*
checkCudaErrors(hipblasSgemm(Cuda::cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T,
this->out_rows, this->in_rows, this->batches,
&Cuda::alpha, d_outputGrad, this->out_rows, d_inputData, this->in_rows,
&Cuda::beta, d_weightGrad, this->out_rows));
*/
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeBiasGrad() {
//const uint32_t batches = this->_inputShape[0][0];
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
// d(Cost)/d(Bias) (same as d_preActivationGrad)
//const Dtype* d_preActivationGrad = this->_preActivation->device_grad();
const Dtype* d_outputGrad = this->_outputData[0]->device_grad();
Dtype* d_biasGrad = this->_params[Bias]->mutable_device_grad();
soooa_gpu_gemv<Dtype>(CblasTrans,
this->batches, this->out_rows,
Cuda::alpha, d_outputGrad, this->d_onevec,
Cuda::alpha, d_biasGrad);
/*
checkCudaErrors(hipblasSgemv(Cuda::cublasHandle, HIPBLAS_OP_N,
this->out_rows, this->batches,
&Cuda::alpha, d_outputGrad, this->out_rows, this->d_onevec, 1,
&Cuda::beta, d_biasGrad, 1));
*/
this->_params[Bias]->print_grad("biasGrad:");
this->_params[Weight]->print_data("weightData:");
//_preActivation->print_grad("preActivationGrad");
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeInputGrad() {
//const uint32_t batches = this->_inputShape[0][0];
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
// d(Cost)/d(Input)
const Dtype* d_weightData = this->_params[Weight]->device_data();
//const Dtype* d_preActivationGrad = this->_preActivation->device_grad();
const Dtype* d_outputGrad = this->_outputData[0]->device_grad();
Dtype* d_inputGrad = this->_inputData[0]->mutable_device_grad();
soooa_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
this->batches, this->in_rows, this->out_rows,
Cuda::alpha, d_outputGrad, d_weightData,
Cuda::beta, d_inputGrad);
/*
checkCudaErrors(hipblasSgemm(Cuda::cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N,
this->in_rows, this->batches, this->out_rows,
&Cuda::alpha, d_weightData, this->out_rows, d_outputGrad, this->out_rows,
&Cuda::beta, d_inputGrad, this->in_rows));
*/
this->_inputData[0]->print_grad("inputGrad:");
/*
if(this->_input->is_nan_grad()) {
cout << SLPROP_BASE(name) << " _input gradient nan ... " << endl;
Data<Dtype>::printConfig = 1;
this->_input->print_grad("deltaInput:");
Data<Dtype>::printConfig = 0;
exit(1);
}
*/
}
template FullyConnectedLayer<float>::~FullyConnectedLayer();
template void FullyConnectedLayer<float>::reshape();
template void FullyConnectedLayer<float>::update();
template void FullyConnectedLayer<float>::feedforward();
template void FullyConnectedLayer<float>::backpropagation();
/*
template void* FullyConnectedLayer<float>::initLayer();
template void FullyConnectedLayer<float>::destroyLayer(void* instancePtr);
template void FullyConnectedLayer<float>::setInOutTensor(void* instancePtr, void* tensorPtr,
bool isInput, int index);
template bool FullyConnectedLayer<float>::allocLayerTensors(void* instancePtr);
template void FullyConnectedLayer<float>::forwardTensor(void* instancePtr, int miniBatchIdx);
template void FullyConnectedLayer<float>::backwardTensor(void* instancePtr);
template void FullyConnectedLayer<float>::learnTensor(void* instancePtr);
*/
|
0a21ea58e6b95d3b6c834a6bae007a4aea565159.cu
|
/*
* FullyConnectedLayer.cpp
*
* Created on: 2016. 5. 10.
* Author: jhkim
*/
#include "cuda_runtime.h"
#include <algorithm>
#include "FullyConnectedLayer.h"
#include "MathFunctions.h"
#include "Util.h"
#include "Network.h"
#include "SysLog.h"
#include "StdOutLog.h"
#include "PropMgmt.h"
#include "Update.h"
#include "Updater.h"
#include "Donator.h"
#include "frcnn_common.h"
#define FULLYCONNECTEDLAYER_LOG 0
using namespace std;
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
/**
* Fills a floating-point array with ones.
*
* @param vec The array to fill.
* @param size The number of elements in the array.
*/
template <typename Dtype>
__global__ void FillValues(Dtype *vec, int size, Dtype value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = value;
}
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
/**
* Fills a floating-point array with ones.
*
* @param vec The array to fill.
* @param size The number of elements in the array.
*/
template <typename Dtype>
__global__ void Dropout(const int n, const Dtype* in, const Dtype* mask,
const unsigned int threashold, const float scale, Dtype *out)
{
CUDA_KERNEL_LOOP(index, n) {
//out[index] = in[index] * (mask[index] > threshold) * scale;
out[index] = in[index] * (mask[index]) * scale;
}
}
/**
* dst array에 src array를 더한다.
*
* @param dst dst array, dst + src가 저장이 될 장소
* @param src src array
* @param N The number of elements in the array.
*/
template <typename Dtype>
__global__ void AddData(Dtype* dst, const Dtype* src, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
dst[idx] = dst[idx] + src[idx];
}
template <typename Dtype>
FullyConnectedLayer<Dtype>::~FullyConnectedLayer() {
if (SLPROP(FullyConnected, receive)) {
Donator<Dtype>::releaseReceiver(SLPROP(FullyConnected, donatorID));
} else {
Util::clearVector(this->_params);
Util::clearVector(this->_paramsHistory);
Util::clearVector(this->_paramsHistory2);
}
checkCudaErrors(cudaFree(this->d_onevec));
this->updateParams.clear();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::reshape() {
if (!Layer<Dtype>::_adjustInputShape()) {
const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1);
const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1);
SASSERT0(count == inputDataCount);
}
/*
// 배치수가 변경되는 경우는 허용하도록 하자.
const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1);
const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1);
if (inputDataCount == count)
return;
*/
// XXX: 주의
// 여기에서는 batch 개수만 변경이 될 수 있다고 가정하였다.
// 따라서 batch 개수에 대한 변경만 체크한다.
if (!Layer<Dtype>::_isInputShapeChanged(0))
return;
this->batches = this->_inputData[0]->getShape(0);
this->in_rows = this->_inputData[0]->getCountByAxis(SLPROP(FullyConnected, axis));
this->out_rows = SLPROP(FullyConnected, nOut);
const uint32_t channels = 1;
const uint32_t cols = 1;
//this->_inputShape[0] = {batches, channels, in_rows, cols};
this->_inputShape[0] = this->_inputData[0]->getShape();
this->_outputData[0]->reshape({this->batches, channels, this->out_rows, cols});
/*
checkCUDNN(cudnnSetTensor4dDescriptor(
this->inputTensorDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
this->batches, channels, this->in_rows, cols));
checkCUDNN(cudnnSetTensor4dDescriptor(
this->outputTensorDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
this->batches, channels, this->out_rows, cols));
*/
STDOUT_COND_LOG(FULLYCONNECTEDLAYER_LOG,
"<%s> layer' input-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), this->batches, channels, this->in_rows, cols);
STDOUT_COND_LOG(FULLYCONNECTEDLAYER_LOG,
"<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), this->batches, channels, this->out_rows, cols);
const uint32_t u_in = in_rows;
const uint32_t u_out = out_rows;
const uint32_t b_in = batches * in_rows;
const uint32_t b_out = batches * out_rows;
STDOUT_COND_LOG(FULLYCONNECTEDLAYER_LOG,
"<%s> layer reshape info (u_in, u_out, b_in, b_out) : %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), u_in, u_out, b_in, b_out);
this->_params[ParamType::Weight]->reshape({1, 1, u_out, u_in});
this->_params[ParamType::Bias]->reshape({1, u_out, 1, 1});
this->_paramsHistory[ParamType::Weight]->reshape({1, 1, u_out, u_in});
this->_paramsHistory[ParamType::Bias]->reshape({1, u_out, 1, 1});;
this->_paramsHistory2[ParamType::Weight]->reshape({1, 1, u_out, u_in});
this->_paramsHistory2[ParamType::Bias]->reshape({1, u_out, 1, 1});
if (!this->_paramsInitialized[Weight]) {
SLPROP(FullyConnected, weightFiller).fill(this->_params[ParamType::Weight]);
this->_paramsInitialized[Weight] = true;
}
if (!this->_paramsInitialized[Bias]) {
SLPROP(FullyConnected, weightFiller).fill(this->_params[ParamType::Bias]);
this->_paramsInitialized[Bias] = true;
}
if (this->updateParams.size() == 0) {
UpdateParam upWeight;
upWeight.paramType = Weight;
upWeight.paramDataPtr = (void*)this->_params[Weight];
upWeight.paramHis1Ptr = (void*)this->_paramsHistory[Weight];
upWeight.paramHis2Ptr = (void*)this->_paramsHistory2[Weight];
this->updateParams.push_back(upWeight);
UpdateParam upBias;
upBias.paramType = Bias;
upBias.paramDataPtr = (void*)this->_params[Bias];
upBias.paramHis1Ptr = (void*)this->_paramsHistory[Bias];
upBias.paramHis2Ptr = (void*)this->_paramsHistory2[Bias];
this->updateParams.push_back(upBias);
}
checkCudaErrors(Util::ucudaMalloc(&this->d_onevec, sizeof(Dtype)*batches));
FillValues<<<SOOOA_GET_BLOCKS(batches), SOOOA_CUDA_NUM_THREADS>>>(
this->d_onevec, batches, 1.0f);
this->_mask.reshape(b_out);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::update() {
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
const uint32_t weightSize = this->in_rows * this->out_rows;
const Dtype regScale =
SNPROP(weightDecay) * SLPROP(FullyConnected, weightUpdateParam).decay_mult;
const Dtype learnScale = Update<Dtype>::calcLearningRate() *
SLPROP(FullyConnected, weightUpdateParam).lr_mult;
const Dtype beta1 = SNPROP(beta1);
const Dtype beta2 = SNPROP(beta2);
SLPROP(FullyConnected, decayedBeta1) *= beta1;
SLPROP(FullyConnected, decayedBeta2) *= beta2;
UpdateContext contextWeight =
Update<Dtype>::makeContext(weightSize, regScale, learnScale);
const uint32_t biasSize = out_rows;
const Dtype regScale_b =
SNPROP(weightDecay) * SLPROP(FullyConnected, biasUpdateParam).decay_mult;
const Dtype learnScale_b = Update<Dtype>::calcLearningRate() *
SLPROP(FullyConnected, biasUpdateParam).lr_mult;
UpdateContext contextBias =
Update<Dtype>::makeContext(biasSize, regScale_b, learnScale_b);
SASSUME0(this->updateParams.size() == 2);
this->updateParams[Weight].context = contextWeight;
this->updateParams[Bias].context = contextBias;
Updater::updateParams(this->updateParams);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::applyChanges(LearnableLayer<Dtype> *targetLayer) {
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
const uint32_t weightSize = this->in_rows * this->out_rows;
const uint32_t biasSize = this->out_rows;
FullyConnectedLayer<Dtype>* _targetLayer = (FullyConnectedLayer<Dtype>*)targetLayer;
//int blockSize = BW;
int blockSize = SOOOA_CUDA_NUM_THREADS;
int gridSize;
gridSize = (weightSize + blockSize -1) / blockSize;
AddData<<<gridSize, blockSize>>>(
_targetLayer->_params[Weight]->mutable_device_grad(),
this->_params[Weight]->device_grad(), weightSize);
gridSize = (biasSize + blockSize -1) / blockSize;
AddData<<<gridSize, blockSize>>>(
_targetLayer->_params[Bias]->mutable_device_grad(),
this->_params[Bias]->device_grad(), biasSize);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::syncParams(LearnableLayer<Dtype> *targetLayer) {
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
const uint32_t weightSize = this->in_rows * this->out_rows;
const uint32_t biasSize = this->out_rows;
FullyConnectedLayer<Dtype>* _targetLayer = (FullyConnectedLayer<Dtype>*)targetLayer;
memcpy(this->_params[Weight]->mutable_host_grad(), _targetLayer->_params[Weight]->host_grad(),
weightSize);
memcpy(this->_params[Bias]->mutable_host_grad(), _targetLayer->_params[Bias]->host_grad(),
biasSize);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::saveParams(ofstream& ofs) {
LearnableLayer<Dtype>::saveParams(ofs);
/*
if (this->_inputData.size() == 1) {
cout << SLPROP_BASE(name) << " saves as usual ... " << endl;
LearnableLayer<Dtype>::saveParams(ofs);
} else {
cout << SLPROP_BASE(name) << " saves as special ... " << endl;
uint32_t numParams = this->_params.size();
vector<vector<float>> bboxMeans;
vector<vector<float>> bboxStds;
fill2dVecWithData(this->_inputData[1], bboxMeans);
fill2dVecWithData(this->_inputData[2], bboxStds);
#if 0
this->_inputData[1]->print_shape();
this->_inputData[2]->print_shape();
this->_params[0]->print_shape();
this->_params[1]->print_shape();
exit(1);
#endif
Data<Dtype>* param0 = this->_params[0];
Data<Dtype> orig0(param0->_name, true);
orig0.reshapeLike(param0);
const Dtype* srcPtr0 = param0->host_data();
Dtype* dstPtr0 = orig0.mutable_host_data();
const int numRows0 = param0->getShape(2);
const int numCols0 = param0->getShape(3);
int index;
int id1, id2;
for (int row = 0; row < numRows0; row++) {
id2 = row / 4;
id1 = row % 4;
for (int col = 0; col < numCols0; col++) {
index = row * numCols0 + col;
dstPtr0[index] = srcPtr0[index] * bboxStds[id2][id1];
}
}
Data<Dtype>* param1 = this->_params[1];
Data<Dtype> orig1(param1->_name, true);
orig1.reshapeLike(param1);
const Dtype* srcPtr1 = param1->host_data();
Dtype* dstPtr1 = orig1.mutable_host_data();
const int numRows1 = param1->getShape(1);
for (int row = 0; row < numRows1; row++) {
id2 = row / 4;
id1 = row % 4;
dstPtr1[row] = srcPtr1[row] * bboxStds[id2][id1] + bboxMeans[id2][id1];
}
orig0.save(ofs);
orig1.save(ofs);
}
*/
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::feedforward() {
reshape();
_computeWeightedData();
_computeWeightBiasedData();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeWeightedData() {
//const uint32_t batches = this->_inputShape[0][0];
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
// Apply weight to input data
const Dtype* d_weightData = this->_params[Weight]->device_data();
const Dtype* d_inputData = this->_inputData[0]->device_data();
//Dtype* d_preActivationData = _preActivation->mutable_device_data();
Dtype* d_outputData = this->_outputData[0]->mutable_device_data();
/**
* [cublasSgemm() 함수 설명 (from cuBlas User Documentation)]
*
* cublasStatus_t cublasSgemm(cublasHandle_t handle, cublasOperation_t transa,
* cublasOperation_t transb, int m, int n, int k,
* const float *alpha, const float *A, int * lda,
* const float *B, int ldb, const float *beta, float *C,
* int ldc)
*
* C = α op ( A ) op ( B ) + β C
*
* where α and β are scalars, and A , B and C are matrices stored in column-major format
* with dimensions op ( A ) m × k , op ( B ) k × n and C m × n , respectively. Also, for
* matrix A
*
* op ( A ) = A if transa == CUBLAS_OP_N A T if transa == CUBLAS_OP_T A H if transa ==
* CUBLAS_OP_C
*
* and op ( B ) is defined similarly for matrix B .
*
* cublasOperation_t option
* (1) CUBLAS_OP_N => the non-transpose operation is selected.
* (2) CUBLAS_OP_T => the transpose operation is selected.
* (3) CUBLAS_OP_C => the conjugate transpose operation is selected.
*
* lda,ldb,ldc => leading dimension of two-dimensional array used to store the matrix A,
* B, C
*/
if (this->batches == 1) {
soooa_gpu_gemv(CblasNoTrans,
this->out_rows, this->in_rows,
Cuda::alpha, d_weightData, d_inputData,
Cuda::beta, d_outputData);
} else {
soooa_gpu_gemm(CblasNoTrans, CblasTrans,
this->batches, this->out_rows, this->in_rows,
Cuda::alpha, d_inputData, d_weightData,
Cuda::beta, d_outputData);
}
/*
checkCudaErrors(cublasSgemm(Cuda::cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
this->out_rows, this->batches, this->in_rows,
&Cuda::alpha, d_weightData, this->out_rows, d_inputData, this->in_rows,
&Cuda::beta, d_outputData, this->out_rows));
*/
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeWeightBiasedData() {
// Add bias to weighted input data
const Dtype* d_biasData = this->_params[Bias]->device_data();
//Dtype* d_preActivationData = _preActivation->mutable_device_data();
Dtype* d_outputData = this->_outputData[0]->mutable_device_data();
this->_params[Bias]->print_data();
if (this->batches == 1) {
soooa_gpu_axpy(this->out_rows, 1.0f, d_biasData, d_outputData);
} else {
soooa_gpu_gemm(CblasNoTrans, CblasNoTrans,
this->batches, this->out_rows, 1,
Cuda::alpha, this->d_onevec, d_biasData,
Cuda::alpha, d_outputData);
}
/*
checkCudaErrors(cublasSgemm(Cuda::cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
this->out_rows, this->batches, 1,
&Cuda::alpha,
d_biasData, this->out_rows,
this->d_onevec, 1,
&Cuda::alpha,
d_outputData, this->out_rows));
*/
this->_params[Bias]->print_data();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::backpropagation() {
/*
* 아래와 같은 simple한 network layer가 있다고 가정하자.
*
* <<<< ith layer >>>> <<<< i+1th layer >>>>
* ..... Xi Wi Ai Fi Yi (=Xi+1) ........
* Bi
* ..... O --------- O ------------ O ........
* dL/dYi is already computed
*
* (※ Xi = i번째 layer의 input 값, Wi = i번째 layer의 weight,
* Bi = i번째 layer의 bias 값, Ai = i번째 layer의 중간 값
* Fi = i번째 layer의 activation function
* Yi = i번째 layer의 ouput 값, i+1 번째 layer의 input 값이기도 함
* L = loss, dL/dYi = i+1번째 layer에서 계산되었던 gradient 값)
*
* gradient descent 방식으로 학습을 하기 위해서는 dL/dWi & dL/dBi가 필요하다.
* 체인 룰에 의하여 아래와 같은 식으로 표현이 된다:
* (가) dYi/dWi = dL/dYi * dYi/dAi * dAi/dWi
* (나) dYi/dBi = dL/dYi * dYi/dAi * dAi/dBi
*
* (가),(나)를 계산하기 위해서는 아래와 같이 4가지 계산이 필요하다.
*
* (A) dL/dYi : i+1번째 layer의 backward 과정에서 _outputData[0]의 grad에 값을 저장해
* 두었다.
*
* (B) dYi/dAi : _computePreActivationGrad() 에서 dL/dYi * dYi/dAi의 계산을 수행 한다.
* dL/dYi는 구해져 있기 때문에 Yi, Ai 값이 필요하다. 이 값들은 forward시에
* 각각 _outputData[0]의 data와 _preActivation의 data에 저장이 되어 있다.
* activation function에 맞게 Yi, Ai, dL/dYi를 입력값으로 하여 dL/dYi *
* dYi/dAi 값이 계산이 되고, 결과값은 this->_preActivation의 grad에 담는다.
*
* (C) dAi/dWi : _computeWeightGrad()에서 (A), (B)의 결과를 조합하여 weight Grad를
* 계산한다. dAi/dWi는 실제로 transpose Xi이기 때문에 GEMM 연산만 진행
* 한다. 결과값은 _params[Weight]의 grad에 저장된다.
*
* (D) dAi/dBi : (C)과정과 동일하다. _computeBiasGrad()에서 bias를 계산하고, 그 결과 값을
* _params[Bias]의 grad에 저장을 하는 것만 다르다.
*
* 마지막으로 i-1 layer에게 dL/dYi-1값을 전달해야 한다. 이 과정은 _computeInputGrad()
* 에서 수행이 된다. 결과값을 _inputData의 grad에 저장한다. dL/dYi-1 = dL/dXi =
* dL/dAi * dAi/dXi가 된다. dL/dAi는 _preAcitvation의 grad에 저장이 되어 있고, dAi/dXi는
* Wi의 transpose 이기 때문에 계산가능하다.
*/
_computeWeightGrad();
_computeBiasGrad();
_computeInputGrad();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeWeightGrad() {
// d(Cost)/d(Weight)
const Dtype* d_outputGrad = this->_outputData[0]->device_grad();
const Dtype* d_inputData = this->_inputData[0]->device_data();
Dtype* d_weightGrad = this->_params[Weight]->mutable_device_grad();
soooa_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
this->out_rows, this->in_rows, this->batches,
Cuda::alpha, d_outputGrad, d_inputData,
Cuda::alpha, d_weightGrad);
/*
checkCudaErrors(cublasSgemm(Cuda::cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T,
this->out_rows, this->in_rows, this->batches,
&Cuda::alpha, d_outputGrad, this->out_rows, d_inputData, this->in_rows,
&Cuda::beta, d_weightGrad, this->out_rows));
*/
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeBiasGrad() {
//const uint32_t batches = this->_inputShape[0][0];
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
// d(Cost)/d(Bias) (same as d_preActivationGrad)
//const Dtype* d_preActivationGrad = this->_preActivation->device_grad();
const Dtype* d_outputGrad = this->_outputData[0]->device_grad();
Dtype* d_biasGrad = this->_params[Bias]->mutable_device_grad();
soooa_gpu_gemv<Dtype>(CblasTrans,
this->batches, this->out_rows,
Cuda::alpha, d_outputGrad, this->d_onevec,
Cuda::alpha, d_biasGrad);
/*
checkCudaErrors(cublasSgemv(Cuda::cublasHandle, CUBLAS_OP_N,
this->out_rows, this->batches,
&Cuda::alpha, d_outputGrad, this->out_rows, this->d_onevec, 1,
&Cuda::beta, d_biasGrad, 1));
*/
this->_params[Bias]->print_grad("biasGrad:");
this->_params[Weight]->print_data("weightData:");
//_preActivation->print_grad("preActivationGrad");
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeInputGrad() {
//const uint32_t batches = this->_inputShape[0][0];
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
// d(Cost)/d(Input)
const Dtype* d_weightData = this->_params[Weight]->device_data();
//const Dtype* d_preActivationGrad = this->_preActivation->device_grad();
const Dtype* d_outputGrad = this->_outputData[0]->device_grad();
Dtype* d_inputGrad = this->_inputData[0]->mutable_device_grad();
soooa_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
this->batches, this->in_rows, this->out_rows,
Cuda::alpha, d_outputGrad, d_weightData,
Cuda::beta, d_inputGrad);
/*
checkCudaErrors(cublasSgemm(Cuda::cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N,
this->in_rows, this->batches, this->out_rows,
&Cuda::alpha, d_weightData, this->out_rows, d_outputGrad, this->out_rows,
&Cuda::beta, d_inputGrad, this->in_rows));
*/
this->_inputData[0]->print_grad("inputGrad:");
/*
if(this->_input->is_nan_grad()) {
cout << SLPROP_BASE(name) << " _input gradient nan ... " << endl;
Data<Dtype>::printConfig = 1;
this->_input->print_grad("deltaInput:");
Data<Dtype>::printConfig = 0;
exit(1);
}
*/
}
template FullyConnectedLayer<float>::~FullyConnectedLayer();
template void FullyConnectedLayer<float>::reshape();
template void FullyConnectedLayer<float>::update();
template void FullyConnectedLayer<float>::feedforward();
template void FullyConnectedLayer<float>::backpropagation();
/*
template void* FullyConnectedLayer<float>::initLayer();
template void FullyConnectedLayer<float>::destroyLayer(void* instancePtr);
template void FullyConnectedLayer<float>::setInOutTensor(void* instancePtr, void* tensorPtr,
bool isInput, int index);
template bool FullyConnectedLayer<float>::allocLayerTensors(void* instancePtr);
template void FullyConnectedLayer<float>::forwardTensor(void* instancePtr, int miniBatchIdx);
template void FullyConnectedLayer<float>::backwardTensor(void* instancePtr);
template void FullyConnectedLayer<float>::learnTensor(void* instancePtr);
*/
|
2a622536e002ecdf0ef8319025314b161f8a55d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define TILE_SIZE 16
unsigned char *d_input;
unsigned char *d_output;
float *d_filter;
inline hipError_t checkCuda(hipError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
exit(-1);
}
#endif
return result;
}
__global__ void gaussianBlur(unsigned char *input,
unsigned char *output,
unsigned int rows,
unsigned int cols,
float *filter,
int filter_width) {
int x = blockIdx.x * TILE_SIZE + threadIdx.x;
int y = blockIdx.y * TILE_SIZE + threadIdx.y;
if (x > cols || y > rows)
return;
int index = y * cols + x;
// Blur algorthm using weighted average (recommended)
float result = 0.0;
for (int r = -filter_width / 2; r < filter_width / 2; r++) {
for (int c = -filter_width / 2; c < filter_width / 2; c++) {
int cur_row = r + y;
int cur_col = c + x;
//if pixel is not at the edge of the image
if ((cur_row > -1) && (cur_row < rows) &&
(cur_col > -1) && (cur_col < cols)) {
int filter_id = (r + filter_width / 2) * filter_width + (c + filter_width / 2);
result += input[cur_row * cols + cur_col] * filter[filter_id];
}
}
}
output[index] = result;
}
void imageBlur (unsigned char* h_input,
unsigned char* h_output,
unsigned int rows,
unsigned int cols,
float* h_filter,
int filter_width) {
// block and grid size
int gridX = 1 + ((cols - 1) / TILE_SIZE);
int gridY = 1 + ((rows - 1) / TILE_SIZE);
dim3 dimGrid(gridX, gridY);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
// allocate memory and copy to GPU
int size = rows * cols;
checkCuda(hipMalloc((void**)&d_input, size * sizeof(unsigned char)));
checkCuda(hipMalloc((void**)&d_output, size * sizeof(unsigned char)));
checkCuda(hipMalloc((void**)&d_filter, filter_width * filter_width * sizeof(float)));
checkCuda(hipMemset(d_output, 0, size * sizeof(unsigned char)));
checkCuda(hipMemcpy(d_input, h_input, size * sizeof(unsigned char), hipMemcpyHostToDevice));
checkCuda(hipMemcpy(d_filter, h_filter, filter_width * filter_width * sizeof(float), hipMemcpyHostToDevice));
//kernel call
hipLaunchKernelGGL(( gaussianBlur), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input, d_output, rows, cols, d_filter, filter_width);
//copy output to host
checkCuda(hipMemcpy(h_output, d_output, size * sizeof(unsigned char), hipMemcpyDeviceToHost));
// free memory
checkCuda(hipFree(d_input));
checkCuda(hipFree(d_output));
}
|
2a622536e002ecdf0ef8319025314b161f8a55d6.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#define TILE_SIZE 16
unsigned char *d_input;
unsigned char *d_output;
float *d_filter;
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
__global__ void gaussianBlur(unsigned char *input,
unsigned char *output,
unsigned int rows,
unsigned int cols,
float *filter,
int filter_width) {
int x = blockIdx.x * TILE_SIZE + threadIdx.x;
int y = blockIdx.y * TILE_SIZE + threadIdx.y;
if (x > cols || y > rows)
return;
int index = y * cols + x;
// Blur algorthm using weighted average (recommended)
float result = 0.0;
for (int r = -filter_width / 2; r < filter_width / 2; r++) {
for (int c = -filter_width / 2; c < filter_width / 2; c++) {
int cur_row = r + y;
int cur_col = c + x;
//if pixel is not at the edge of the image
if ((cur_row > -1) && (cur_row < rows) &&
(cur_col > -1) && (cur_col < cols)) {
int filter_id = (r + filter_width / 2) * filter_width + (c + filter_width / 2);
result += input[cur_row * cols + cur_col] * filter[filter_id];
}
}
}
output[index] = result;
}
void imageBlur (unsigned char* h_input,
unsigned char* h_output,
unsigned int rows,
unsigned int cols,
float* h_filter,
int filter_width) {
// block and grid size
int gridX = 1 + ((cols - 1) / TILE_SIZE);
int gridY = 1 + ((rows - 1) / TILE_SIZE);
dim3 dimGrid(gridX, gridY);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
// allocate memory and copy to GPU
int size = rows * cols;
checkCuda(cudaMalloc((void**)&d_input, size * sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&d_output, size * sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&d_filter, filter_width * filter_width * sizeof(float)));
checkCuda(cudaMemset(d_output, 0, size * sizeof(unsigned char)));
checkCuda(cudaMemcpy(d_input, h_input, size * sizeof(unsigned char), cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(d_filter, h_filter, filter_width * filter_width * sizeof(float), cudaMemcpyHostToDevice));
//kernel call
gaussianBlur<<<dimGrid, dimBlock>>>(d_input, d_output, rows, cols, d_filter, filter_width);
//copy output to host
checkCuda(cudaMemcpy(h_output, d_output, size * sizeof(unsigned char), cudaMemcpyDeviceToHost));
// free memory
checkCuda(cudaFree(d_input));
checkCuda(cudaFree(d_output));
}
|
c0ab4210a51b49bd2cb8a53bd4e31933a68e49c7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "normal_eqs_disparity_multicam_GPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_CD = NULL;
hipMalloc(&d_CD, XSIZE*YSIZE);
float *d_disparity_compact = NULL;
hipMalloc(&d_disparity_compact, XSIZE*YSIZE);
float4 *d_Zbuffer_normals_compact = NULL;
hipMalloc(&d_Zbuffer_normals_compact, XSIZE*YSIZE);
int *d_ind_disparity_Zbuffer = NULL;
hipMalloc(&d_ind_disparity_Zbuffer, XSIZE*YSIZE);
const float *d_focal_length = NULL;
hipMalloc(&d_focal_length, XSIZE*YSIZE);
const float *d_nodal_point_x = NULL;
hipMalloc(&d_nodal_point_x, XSIZE*YSIZE);
const float *d_nodal_point_y = NULL;
hipMalloc(&d_nodal_point_y, XSIZE*YSIZE);
const float *d_baseline = NULL;
hipMalloc(&d_baseline, XSIZE*YSIZE);
const int *d_n_cols = NULL;
hipMalloc(&d_n_cols, XSIZE*YSIZE);
const int *d_n_values_disparity = NULL;
hipMalloc(&d_n_values_disparity, XSIZE*YSIZE);
const int *d_start_ind_disparity = NULL;
hipMalloc(&d_start_ind_disparity, XSIZE*YSIZE);
const int *d_pixel_ind_offset = NULL;
hipMalloc(&d_pixel_ind_offset, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
normal_eqs_disparity_multicam_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_CD,d_disparity_compact,d_Zbuffer_normals_compact,d_ind_disparity_Zbuffer,d_focal_length,d_nodal_point_x,d_nodal_point_y,d_baseline,d_n_cols,d_n_values_disparity,d_start_ind_disparity,d_pixel_ind_offset);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
normal_eqs_disparity_multicam_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_CD,d_disparity_compact,d_Zbuffer_normals_compact,d_ind_disparity_Zbuffer,d_focal_length,d_nodal_point_x,d_nodal_point_y,d_baseline,d_n_cols,d_n_values_disparity,d_start_ind_disparity,d_pixel_ind_offset);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
normal_eqs_disparity_multicam_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_CD,d_disparity_compact,d_Zbuffer_normals_compact,d_ind_disparity_Zbuffer,d_focal_length,d_nodal_point_x,d_nodal_point_y,d_baseline,d_n_cols,d_n_values_disparity,d_start_ind_disparity,d_pixel_ind_offset);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
c0ab4210a51b49bd2cb8a53bd4e31933a68e49c7.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "normal_eqs_disparity_multicam_GPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_CD = NULL;
cudaMalloc(&d_CD, XSIZE*YSIZE);
float *d_disparity_compact = NULL;
cudaMalloc(&d_disparity_compact, XSIZE*YSIZE);
float4 *d_Zbuffer_normals_compact = NULL;
cudaMalloc(&d_Zbuffer_normals_compact, XSIZE*YSIZE);
int *d_ind_disparity_Zbuffer = NULL;
cudaMalloc(&d_ind_disparity_Zbuffer, XSIZE*YSIZE);
const float *d_focal_length = NULL;
cudaMalloc(&d_focal_length, XSIZE*YSIZE);
const float *d_nodal_point_x = NULL;
cudaMalloc(&d_nodal_point_x, XSIZE*YSIZE);
const float *d_nodal_point_y = NULL;
cudaMalloc(&d_nodal_point_y, XSIZE*YSIZE);
const float *d_baseline = NULL;
cudaMalloc(&d_baseline, XSIZE*YSIZE);
const int *d_n_cols = NULL;
cudaMalloc(&d_n_cols, XSIZE*YSIZE);
const int *d_n_values_disparity = NULL;
cudaMalloc(&d_n_values_disparity, XSIZE*YSIZE);
const int *d_start_ind_disparity = NULL;
cudaMalloc(&d_start_ind_disparity, XSIZE*YSIZE);
const int *d_pixel_ind_offset = NULL;
cudaMalloc(&d_pixel_ind_offset, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
normal_eqs_disparity_multicam_GPU<<<gridBlock,threadBlock>>>(d_CD,d_disparity_compact,d_Zbuffer_normals_compact,d_ind_disparity_Zbuffer,d_focal_length,d_nodal_point_x,d_nodal_point_y,d_baseline,d_n_cols,d_n_values_disparity,d_start_ind_disparity,d_pixel_ind_offset);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
normal_eqs_disparity_multicam_GPU<<<gridBlock,threadBlock>>>(d_CD,d_disparity_compact,d_Zbuffer_normals_compact,d_ind_disparity_Zbuffer,d_focal_length,d_nodal_point_x,d_nodal_point_y,d_baseline,d_n_cols,d_n_values_disparity,d_start_ind_disparity,d_pixel_ind_offset);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
normal_eqs_disparity_multicam_GPU<<<gridBlock,threadBlock>>>(d_CD,d_disparity_compact,d_Zbuffer_normals_compact,d_ind_disparity_Zbuffer,d_focal_length,d_nodal_point_x,d_nodal_point_y,d_baseline,d_n_cols,d_n_values_disparity,d_start_ind_disparity,d_pixel_ind_offset);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
bc6456f788c3a071c4c1f30f91fd35189e6859bf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts, float alpha_, float gamma_) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
//loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
// Dtype(FLT_MIN)));
Dtype pt = prob_data[n * dim + label_value * spatial_dim + s];
loss[index] = -alpha_ * powf(1 - pt, gamma_) * log(max(pt, Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( FocalLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
alpha_, gamma_);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void FocalLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts, float alpha_, float gamma_) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
Dtype pt = bottom_diff[n * dim + label_value * spatial_dim + s];
for (int c = 0; c < channels; ++c) {
if(c == label_value){
bottom_diff[n * dim + c * spatial_dim + s] = alpha_ *
powf(1 - pt, gamma_) * (gamma_ * pt * log(max(pt, Dtype(FLT_MIN))) + pt - 1);
}
else{
Dtype pc = bottom_diff[n * dim + c * spatial_dim + s];
bottom_diff[n * dim + c * spatial_dim + s] = alpha_ *
(powf(1 - pt, gamma_ - 1) * (-gamma_ * log(max(pt, Dtype(FLT_MIN))) * pt * pc) +
powf(1 - pt, gamma_) * pc);
}
}
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( FocalLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
alpha_, gamma_);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, valid_count);
const Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer;
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe
|
bc6456f788c3a071c4c1f30f91fd35189e6859bf.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts, float alpha_, float gamma_) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
//loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
// Dtype(FLT_MIN)));
Dtype pt = prob_data[n * dim + label_value * spatial_dim + s];
loss[index] = -alpha_ * powf(1 - pt, gamma_) * log(max(pt, Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
alpha_, gamma_);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void FocalLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts, float alpha_, float gamma_) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
Dtype pt = bottom_diff[n * dim + label_value * spatial_dim + s];
for (int c = 0; c < channels; ++c) {
if(c == label_value){
bottom_diff[n * dim + c * spatial_dim + s] = alpha_ *
powf(1 - pt, gamma_) * (gamma_ * pt * log(max(pt, Dtype(FLT_MIN))) + pt - 1);
}
else{
Dtype pc = bottom_diff[n * dim + c * spatial_dim + s];
bottom_diff[n * dim + c * spatial_dim + s] = alpha_ *
(powf(1 - pt, gamma_ - 1) * (-gamma_ * log(max(pt, Dtype(FLT_MIN))) * pt * pc) +
powf(1 - pt, gamma_) * pc);
}
}
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
alpha_, gamma_);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, valid_count);
const Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer;
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe
|
a49c010c75efde01536e0a177cb49dcafdff822e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from ztrtri_lower.cu normal z -> s, Fri Jul 18 17:34:13 2014
@author Peng Du
@author Tingxing Dong
@author Mark Gates
This file implements lower case, and is called by strtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "common_magma.h"
#include "strtri.h"
/*
This inverts the diagonal IB by IB inner blocks of A,
and stores the results in d_dinvA.
Each thread block with IB threads does one inner block.
Each thread deals with one row of the inner block.
*/
__global__ void
strtri_diag_kernel_lower(
magma_diag_t diag, int n, const float *A, int lda, float *d_dinvA)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int blk_ind = bx*IB;
int ind = blk_ind + tx;
A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind)
// TODO sB should be [IB][IB+1] to avoid bank conflicts, right?
__shared__ float sB[IB*IB];
float y_tx;
// load lower triangle of inner block of A; zero upper triangle & outside matrix
#pragma unroll
for( int j=0; j < IB; j++ ) {
if (tx >= j && ind < n) {
sB[tx + j*IB] = A[tx + j*lda];
}
else {
sB[tx + j*IB] = MAGMA_S_ZERO;
}
}
__syncthreads();
// invert the diagonal
if (diag == MagmaUnit) {
sB[tx + tx*IB] = MAGMA_S_ONE;
}
else {
if ( sB[tx + tx*IB] == MAGMA_S_ZERO ) { // singular or outside matrix
sB[tx + tx*IB] = MAGMA_S_ONE;
}
else {
sB[tx + tx*IB] = MAGMA_S_ONE / sB[tx + tx*IB];
}
}
// compute elements j+1:IB-1 of j-th column.
for( int j=IB-2; j >= 0; j-- ) {
if ( tx > j ) {
// trmv: y = sB(j+1:IB-1, j+1:IB-1) * sB(j+1:IB-1, j)
// each thread sums one element, y[tx]
y_tx = MAGMA_S_ZERO;
#pragma unroll
for( int k=j+1; k < IB; k++ )
y_tx += sB[tx + k*IB] * sB[k + j*IB];
// scal: sB(j+1:IB-1, j) = -sB(j,j) * y
sB[tx + j*IB] = -sB[j + j*IB] * y_tx;
}
__syncthreads();
}
// go to the (bx / ib_per_NB) outer NB*NB block,
// then the (bx % ib_per_NB) inner IB*IB block inside that.
int ib_per_NB = NB/IB;
d_dinvA += (bx / ib_per_NB)*NB*NB
+ (bx % ib_per_NB)*(NB*IB + IB);
// write result
#pragma unroll
for( int j=0; j < IB; j++ ) {
d_dinvA[tx + j*NB] = sB[tx + j*IB];
}
}
/*
Let A be an NB*NB lower triangular matrix, and B its inverse.
Then the block decomposition
[ A11 0 ] * [ B11 0 ] = [ I 0 ]
[ A21 A22 ] [ B21 B22 ] [ 0 I ]
yields
A11*B11 = I ==> B11 = A11^{-1},
A22*B22 = I ==> B22 = A22^{-1},
A21*B11 + A22*B21 = 0 ==> B21 = -A22^{-1}*A21*B11 = -B22*A21*B11.
strtri_diag_kernel inverts A11 and A22.
triple_sgemm16 routines multiply:
part 1: B21 = A21 * B11,
part 2: B21 = -B22 * B21.
At this level, inner block is jb=16, with one 4x4 thread block per inner block.
Each submatrix Aij and Bij is jb x jb.
The submatrix dimension is multiplied by 2 at each level,
so the next level is jb*2 = 32.
A "page" is the next bigger block, here jb*2=32,
[ B11 0 ]
which contains [ B21 B22 ].
Outer blocks are NB x NB.
A21 may have < jb rows, but is guaranteed to have jb cols since A22 is on
the right. This makes a single check easy to do.
B is stored in workspace that is a full multiple of NB x NB; no checks needed.
*/
/*
* B21 = A21 * B11
*/
__global__ void
triple_sgemm16_part1_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
// emulate 3D grid: NX * (NY*npages)
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// TODO this won't coalesce, will it? unless NX=32 (or maybe 16 with floats, or 8 with float-real)
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// TODO instead of writing result, copy it to sB and do part 2.
// Would only work for jb=16, because only then does rC fit into sB.
// If sB were [NT][16+], then rC would fit into sB.
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
__global__ void
triple_sgemm16_part2_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB; // shadows lda argument
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B21
B = C; // B21, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
// TODO factor this out:
// gemm16<NX, NY> computes NT x 16 block of C:
// C(1:nt, 1:16) = A(1:nt, 1:jb) * B(1:jb, 1:16)
// where NT = NX * NY.
// part 1: gemm16<4,4>( /*NT, 16,*/ jb, 1, A21, lda, B11, NB, /*0*/, B21, NB, n, ind, tx, ty );
// part 2: gemm16<4,4>( /*NT, 16,*/ jb, -1, B22, NB, B21, NB, /*0*/, B21, NB, n, ind, tx, ty ); // okay for C to overwrite B
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B21 = A21 * B11
*/
__global__ void
triple_sgemm32_part1_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
__global__ void
triple_sgemm32_part2_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B21
B = C; // B21, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B21 = A21 * B11
*/
__global__ void
triple_sgemm64_part1_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
__global__ void
triple_sgemm64_part2_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B21
B = C; // B21, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B21 = A21 * B11
*/
__global__ void
triple_sgemm_above64_part1_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// For jb > 64, we process B21 as gridDim.x sections of 64 rows each, with gridDim.x > 1.
// Each section needs all of the B matrix, so C cannot overwrite B.
// Therefore, store B21 temporarily in the previously unused B12 matrix
// (i.e., above diagonal), then in part 3, zero out B12.
//
// Kernels with jb <= 64 don't have this problem, because only the
// NT x 16 section of C that overwrites the same section of B depends
// on that section of B.
//
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb*NB; // B21; write to B12 temp location
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
__global__ void
triple_sgemm_above64_part2_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
B = d_dinvA + jb*NB; // B21, read from B12 temp location
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* zero out B12 temp location
*/
__global__ void
triple_sgemm_above64_part3_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part three---------------------------//
{
// zero out B12 temp location
float *B12;
int ldb = NB;
B12 = d_dinvA + jb*NB;
B12 += ibx + id + iby*ldb;
#pragma unroll
for( int i = 0; i < 16; i++ ) {
B12[i*ldb] = MAGMA_S_ZERO;
}
}
}
|
a49c010c75efde01536e0a177cb49dcafdff822e.cu
|
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from ztrtri_lower.cu normal z -> s, Fri Jul 18 17:34:13 2014
@author Peng Du
@author Tingxing Dong
@author Mark Gates
This file implements lower case, and is called by strtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "common_magma.h"
#include "strtri.h"
/*
This inverts the diagonal IB by IB inner blocks of A,
and stores the results in d_dinvA.
Each thread block with IB threads does one inner block.
Each thread deals with one row of the inner block.
*/
__global__ void
strtri_diag_kernel_lower(
magma_diag_t diag, int n, const float *A, int lda, float *d_dinvA)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int blk_ind = bx*IB;
int ind = blk_ind + tx;
A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind)
// TODO sB should be [IB][IB+1] to avoid bank conflicts, right?
__shared__ float sB[IB*IB];
float y_tx;
// load lower triangle of inner block of A; zero upper triangle & outside matrix
#pragma unroll
for( int j=0; j < IB; j++ ) {
if (tx >= j && ind < n) {
sB[tx + j*IB] = A[tx + j*lda];
}
else {
sB[tx + j*IB] = MAGMA_S_ZERO;
}
}
__syncthreads();
// invert the diagonal
if (diag == MagmaUnit) {
sB[tx + tx*IB] = MAGMA_S_ONE;
}
else {
if ( sB[tx + tx*IB] == MAGMA_S_ZERO ) { // singular or outside matrix
sB[tx + tx*IB] = MAGMA_S_ONE;
}
else {
sB[tx + tx*IB] = MAGMA_S_ONE / sB[tx + tx*IB];
}
}
// compute elements j+1:IB-1 of j-th column.
for( int j=IB-2; j >= 0; j-- ) {
if ( tx > j ) {
// trmv: y = sB(j+1:IB-1, j+1:IB-1) * sB(j+1:IB-1, j)
// each thread sums one element, y[tx]
y_tx = MAGMA_S_ZERO;
#pragma unroll
for( int k=j+1; k < IB; k++ )
y_tx += sB[tx + k*IB] * sB[k + j*IB];
// scal: sB(j+1:IB-1, j) = -sB(j,j) * y
sB[tx + j*IB] = -sB[j + j*IB] * y_tx;
}
__syncthreads();
}
// go to the (bx / ib_per_NB) outer NB*NB block,
// then the (bx % ib_per_NB) inner IB*IB block inside that.
int ib_per_NB = NB/IB;
d_dinvA += (bx / ib_per_NB)*NB*NB
+ (bx % ib_per_NB)*(NB*IB + IB);
// write result
#pragma unroll
for( int j=0; j < IB; j++ ) {
d_dinvA[tx + j*NB] = sB[tx + j*IB];
}
}
/*
Let A be an NB*NB lower triangular matrix, and B its inverse.
Then the block decomposition
[ A11 0 ] * [ B11 0 ] = [ I 0 ]
[ A21 A22 ] [ B21 B22 ] [ 0 I ]
yields
A11*B11 = I ==> B11 = A11^{-1},
A22*B22 = I ==> B22 = A22^{-1},
A21*B11 + A22*B21 = 0 ==> B21 = -A22^{-1}*A21*B11 = -B22*A21*B11.
strtri_diag_kernel inverts A11 and A22.
triple_sgemm16 routines multiply:
part 1: B21 = A21 * B11,
part 2: B21 = -B22 * B21.
At this level, inner block is jb=16, with one 4x4 thread block per inner block.
Each submatrix Aij and Bij is jb x jb.
The submatrix dimension is multiplied by 2 at each level,
so the next level is jb*2 = 32.
A "page" is the next bigger block, here jb*2=32,
[ B11 0 ]
which contains [ B21 B22 ].
Outer blocks are NB x NB.
A21 may have < jb rows, but is guaranteed to have jb cols since A22 is on
the right. This makes a single check easy to do.
B is stored in workspace that is a full multiple of NB x NB; no checks needed.
*/
/*
* B21 = A21 * B11
*/
__global__ void
triple_sgemm16_part1_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
// emulate 3D grid: NX * (NY*npages)
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// TODO this won't coalesce, will it? unless NX=32 (or maybe 16 with floats, or 8 with float-real)
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// TODO instead of writing result, copy it to sB and do part 2.
// Would only work for jb=16, because only then does rC fit into sB.
// If sB were [NT][16+], then rC would fit into sB.
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
__global__ void
triple_sgemm16_part2_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB; // shadows lda argument
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B21
B = C; // B21, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
// TODO factor this out:
// gemm16<NX, NY> computes NT x 16 block of C:
// C(1:nt, 1:16) = A(1:nt, 1:jb) * B(1:jb, 1:16)
// where NT = NX * NY.
// part 1: gemm16<4,4>( /*NT, 16,*/ jb, 1, A21, lda, B11, NB, /*0*/, B21, NB, n, ind, tx, ty );
// part 2: gemm16<4,4>( /*NT, 16,*/ jb, -1, B22, NB, B21, NB, /*0*/, B21, NB, n, ind, tx, ty ); // okay for C to overwrite B
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B21 = A21 * B11
*/
__global__ void
triple_sgemm32_part1_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
__global__ void
triple_sgemm32_part2_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B21
B = C; // B21, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B21 = A21 * B11
*/
__global__ void
triple_sgemm64_part1_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
__global__ void
triple_sgemm64_part2_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B21
B = C; // B21, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B21 = A21 * B11
*/
__global__ void
triple_sgemm_above64_part1_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// For jb > 64, we process B21 as gridDim.x sections of 64 rows each, with gridDim.x > 1.
// Each section needs all of the B matrix, so C cannot overwrite B.
// Therefore, store B21 temporarily in the previously unused B12 matrix
// (i.e., above diagonal), then in part 3, zero out B12.
//
// Kernels with jb <= 64 don't have this problem, because only the
// NT x 16 section of C that overwrites the same section of B depends
// on that section of B.
//
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb*NB; // B21; write to B12 temp location
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
__global__ void
triple_sgemm_above64_part2_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
B = d_dinvA + jb*NB; // B21, read from B12 temp location
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4];
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* zero out B12 temp location
*/
__global__ void
triple_sgemm_above64_part3_lower(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part three---------------------------//
{
// zero out B12 temp location
float *B12;
int ldb = NB;
B12 = d_dinvA + jb*NB;
B12 += ibx + id + iby*ldb;
#pragma unroll
for( int i = 0; i < 16; i++ ) {
B12[i*ldb] = MAGMA_S_ZERO;
}
}
}
|
0a51163f2a6e246af665b96dc4a9a3bb47e857ab.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// --------------------------------------------------------
// Multitask Network Cascade
// Modified from MATLAB Faster R-CNN (https://github.com/shaoqingren/faster_rcnn)
// Copyright (c) 2016, Haozhi Qi
// Licensed under The MIT License [see LICENSE for details]
// --------------------------------------------------------
#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(hipGetDevice(¤t_device));
//std::cout << current_device << std::endl;
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(hipSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
//_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(hipMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(hipMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(mask_dev));
}
|
0a51163f2a6e246af665b96dc4a9a3bb47e857ab.cu
|
// --------------------------------------------------------
// Multitask Network Cascade
// Modified from MATLAB Faster R-CNN (https://github.com/shaoqingren/faster_rcnn)
// Copyright (c) 2016, Haozhi Qi
// Licensed under The MIT License [see LICENSE for details]
// --------------------------------------------------------
#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
//std::cout << current_device << std::endl;
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
//_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(cudaMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(cudaMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(mask_dev));
}
|
f45baff206840b925586ab7cb46593049c1687a8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Colour Sine wave Kernal
// Based on kernal_colour in kernelVBO.cpp by Rob Farber
__global__ void kernel(float4* dVertexArray, uchar4 *dColorArray,
unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// Each thread is unique point (u,v) in interval [-1,1],[-1,1]
const float u = 2.0* (x/(float)width) - 1.0f;
const float v = 2.0* (y/(float)height) - 1.0f;
const float w = 0.5 * sinf(4.0*u + time) * cosf(4.0*v + time);
// Update vertex array for point
dVertexArray[y*width+x] = make_float4(u, w, v, 1.0f);
// Update colour array for point
dColorArray[y*width+x].w = 0;
dColorArray[y*width+x].x = 255.f *0.5*(1.f+sinf(w+x));
dColorArray[y*width+x].y = 255.f *0.5*(1.f+sinf(x)*cosf(y));
dColorArray[y*width+x].z = 255.f *0.5*(1.f+sinf(w+time/10.f));
}
extern "C" void launch_kernel(float4* dVertexArray, uchar4* dColourArray,
unsigned int width, unsigned int height, float time)
{
dim3 block(8, 8, 1);
dim3 grid(width / block.x, height / block.y, 1);
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, dVertexArray, dColourArray, width, height, time);
}
|
f45baff206840b925586ab7cb46593049c1687a8.cu
|
// Colour Sine wave Kernal
// Based on kernal_colour in kernelVBO.cpp by Rob Farber
__global__ void kernel(float4* dVertexArray, uchar4 *dColorArray,
unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// Each thread is unique point (u,v) in interval [-1,1],[-1,1]
const float u = 2.0* (x/(float)width) - 1.0f;
const float v = 2.0* (y/(float)height) - 1.0f;
const float w = 0.5 * sinf(4.0*u + time) * cosf(4.0*v + time);
// Update vertex array for point
dVertexArray[y*width+x] = make_float4(u, w, v, 1.0f);
// Update colour array for point
dColorArray[y*width+x].w = 0;
dColorArray[y*width+x].x = 255.f *0.5*(1.f+sinf(w+x));
dColorArray[y*width+x].y = 255.f *0.5*(1.f+sinf(x)*cosf(y));
dColorArray[y*width+x].z = 255.f *0.5*(1.f+sinf(w+time/10.f));
}
extern "C" void launch_kernel(float4* dVertexArray, uchar4* dColourArray,
unsigned int width, unsigned int height, float time)
{
dim3 block(8, 8, 1);
dim3 grid(width / block.x, height / block.y, 1);
kernel<<< grid, block>>>(dVertexArray, dColourArray, width, height, time);
}
|
0c3c1b15d7c0a37c6342e9045b5b10661ba4b547.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cassert>
#include <cstring>
#include <iostream>
#include <string>
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
#include "cusolver_utils.h"
int main(int argc, const char *argv[]) {
bool verbose = false;
// Matrix size
const int N = 1024;
// Numer of right hand sides
const int nrhs = 1;
// Use double precision matrix and half precision factorization
typedef double T;
const cusolverPrecType_t matrix_precision = CUSOLVER_R_64F;
// make sure that you specify matrix precision that matches to the data type
assert(traits<T>::cusolver_precision_type == matrix_precision);
const cusolverPrecType_t compute_lower_precision = CUSOLVER_R_16F;
// Use GMRES refinement solver
const cusolverIRSRefinement_t refinement_solver = CUSOLVER_IRS_REFINE_GMRES;
T *hA;
cusolver_int_t lda;
T *hB;
cusolver_int_t ldb;
T *hX;
cusolver_int_t ldx;
hipStream_t stream;
hipEvent_t event_start, event_end;
hipsolverDnHandle_t handle;
cusolverDnIRSParams_t gesv_params;
cusolverDnIRSInfos_t gesv_info;
std::cout << "Generating matrix A on host..." << std::endl;
generate_random_matrix<T>(N, N, &hA, &lda);
std::cout << "make A diagonal dominant..." << std::endl;
make_diag_dominant_matrix<T>(N, N, hA, lda);
std::cout << "Generating matrix B on host..." << std::endl;
generate_random_matrix<T>(nrhs, N, &hB, &ldb);
std::cout << "Generating matrix X on host..." << std::endl;
generate_random_matrix<T>(nrhs, N, &hX, &ldx);
if (verbose) {
std::cout << "A: \n";
print_matrix(N, N, hA, lda);
std::cout << "B: \n";
print_matrix(nrhs, N, hB, ldb);
std::cout << "X: \n";
print_matrix(nrhs, N, hX, ldx);
}
std::cout << "Initializing CUDA..." << std::endl;
CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
CUDA_CHECK(hipEventCreate(&event_start));
CUDA_CHECK(hipEventCreate(&event_end));
CUSOLVER_CHECK(hipsolverDnCreate(&handle));
CUSOLVER_CHECK(hipsolverDnSetStream(handle, stream));
std::cout << "Setting up gesv() parameters..." << std::endl;
// create solver parameters
CUSOLVER_CHECK(cusolverDnIRSParamsCreate(&gesv_params));
// set matrix precision and factorization precision
CUSOLVER_CHECK(cusolverDnIRSParamsSetSolverPrecisions(gesv_params, matrix_precision,
compute_lower_precision));
// set refinement solver
CUSOLVER_CHECK(cusolverDnIRSParamsSetRefinementSolver(gesv_params, refinement_solver));
// create solve info structure
CUSOLVER_CHECK(cusolverDnIRSInfosCreate(&gesv_info));
// matrix on device
T *dA;
cusolver_int_t ldda = ALIGN_TO(N * sizeof(T), device_alignment) / sizeof(T);
// right hand side on device
T *dB;
cusolver_int_t lddb = ALIGN_TO(N * sizeof(T), device_alignment) / sizeof(T);
// solution on device
T *dX;
cusolver_int_t lddx = ALIGN_TO(N * sizeof(T), device_alignment) / sizeof(T);
// pivot sequence on device
cusolver_int_t *dipiv;
// info indicator on device
cusolver_int_t *dinfo;
// work buffer
void *dwork;
// size of work buffer
size_t dwork_size;
// number of refinement iterations returned by solver
cusolver_int_t iter;
std::cout << "Allocating memory on device..." << std::endl;
// allocate data
CUDA_CHECK(hipMalloc(&dA, ldda * N * sizeof(T)));
CUDA_CHECK(hipMalloc(&dB, lddb * nrhs * sizeof(T)));
CUDA_CHECK(hipMalloc(&dX, lddx * nrhs * sizeof(T)));
CUDA_CHECK(hipMalloc(&dipiv, N * sizeof(cusolver_int_t)));
CUDA_CHECK(hipMalloc(&dinfo, sizeof(cusolver_int_t)));
// copy input data
CUDA_CHECK(hipMemcpy2D(dA, ldda * sizeof(T), hA, lda * sizeof(T), N * sizeof(T), N,
hipMemcpyDefault));
CUDA_CHECK(hipMemcpy2D(dB, lddb * sizeof(T), hB, ldb * sizeof(T), N * sizeof(T), nrhs,
hipMemcpyDefault));
// get required device work buffer size
CUSOLVER_CHECK(cusolverDnIRSXgesv_bufferSize(handle, gesv_params, N, nrhs, &dwork_size));
std::cout << "Workspace is " << dwork_size << " bytes" << std::endl;
CUDA_CHECK(hipMalloc(&dwork, dwork_size));
std::cout << "Solving matrix on device..." << std::endl;
CUDA_CHECK(hipEventRecord(event_start, stream));
cusolverStatus_t gesv_status =
cusolverDnIRSXgesv(handle, gesv_params, gesv_info, N, nrhs, dA, ldda, dB, lddb, dX, lddx,
dwork, dwork_size, &iter, dinfo);
CUSOLVER_CHECK(gesv_status);
CUDA_CHECK(hipEventRecord(event_end, stream));
// check solve status
int info = 0;
CUDA_CHECK(
hipMemcpyAsync(&info, dinfo, sizeof(cusolver_int_t), hipMemcpyDeviceToHost, stream));
CUDA_CHECK(hipStreamSynchronize(stream));
std::cout << "Solve info is: " << info << ", iter is: " << iter << std::endl;
CUDA_CHECK(hipMemcpy2D(hX, ldx * sizeof(T), dX, lddx * sizeof(T), N * sizeof(T), nrhs,
hipMemcpyDefault));
if (verbose) {
std::cout << "X:\n";
print_matrix(nrhs, N, hX, ldx);
}
CUDA_CHECK(hipGetLastError());
float solve_time = 0.f;
CUDA_CHECK(hipEventElapsedTime(&solve_time, event_start, event_end));
std::cout << "Solved matrix " << N << "x" << N << " with " << nrhs << " right hand sides in "
<< solve_time << "ms" << std::endl;
std::cout << "Releasing resources..." << std::endl;
CUDA_CHECK(hipFree(dwork));
CUDA_CHECK(hipFree(dinfo));
CUDA_CHECK(hipFree(dipiv));
CUDA_CHECK(hipFree(dX));
CUDA_CHECK(hipFree(dB));
CUDA_CHECK(hipFree(dA));
free(hA);
free(hB);
free(hX);
CUSOLVER_CHECK(hipsolverDnDestroy(handle));
CUDA_CHECK(hipEventDestroy(event_start));
CUDA_CHECK(hipEventDestroy(event_end));
CUDA_CHECK(hipStreamDestroy(stream));
std::cout << "Done!" << std::endl;
return 0;
}
|
0c3c1b15d7c0a37c6342e9045b5b10661ba4b547.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cassert>
#include <cstring>
#include <iostream>
#include <string>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include "cusolver_utils.h"
int main(int argc, const char *argv[]) {
bool verbose = false;
// Matrix size
const int N = 1024;
// Numer of right hand sides
const int nrhs = 1;
// Use double precision matrix and half precision factorization
typedef double T;
const cusolverPrecType_t matrix_precision = CUSOLVER_R_64F;
// make sure that you specify matrix precision that matches to the data type
assert(traits<T>::cusolver_precision_type == matrix_precision);
const cusolverPrecType_t compute_lower_precision = CUSOLVER_R_16F;
// Use GMRES refinement solver
const cusolverIRSRefinement_t refinement_solver = CUSOLVER_IRS_REFINE_GMRES;
T *hA;
cusolver_int_t lda;
T *hB;
cusolver_int_t ldb;
T *hX;
cusolver_int_t ldx;
cudaStream_t stream;
cudaEvent_t event_start, event_end;
cusolverDnHandle_t handle;
cusolverDnIRSParams_t gesv_params;
cusolverDnIRSInfos_t gesv_info;
std::cout << "Generating matrix A on host..." << std::endl;
generate_random_matrix<T>(N, N, &hA, &lda);
std::cout << "make A diagonal dominant..." << std::endl;
make_diag_dominant_matrix<T>(N, N, hA, lda);
std::cout << "Generating matrix B on host..." << std::endl;
generate_random_matrix<T>(nrhs, N, &hB, &ldb);
std::cout << "Generating matrix X on host..." << std::endl;
generate_random_matrix<T>(nrhs, N, &hX, &ldx);
if (verbose) {
std::cout << "A: \n";
print_matrix(N, N, hA, lda);
std::cout << "B: \n";
print_matrix(nrhs, N, hB, ldb);
std::cout << "X: \n";
print_matrix(nrhs, N, hX, ldx);
}
std::cout << "Initializing CUDA..." << std::endl;
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
CUDA_CHECK(cudaEventCreate(&event_start));
CUDA_CHECK(cudaEventCreate(&event_end));
CUSOLVER_CHECK(cusolverDnCreate(&handle));
CUSOLVER_CHECK(cusolverDnSetStream(handle, stream));
std::cout << "Setting up gesv() parameters..." << std::endl;
// create solver parameters
CUSOLVER_CHECK(cusolverDnIRSParamsCreate(&gesv_params));
// set matrix precision and factorization precision
CUSOLVER_CHECK(cusolverDnIRSParamsSetSolverPrecisions(gesv_params, matrix_precision,
compute_lower_precision));
// set refinement solver
CUSOLVER_CHECK(cusolverDnIRSParamsSetRefinementSolver(gesv_params, refinement_solver));
// create solve info structure
CUSOLVER_CHECK(cusolverDnIRSInfosCreate(&gesv_info));
// matrix on device
T *dA;
cusolver_int_t ldda = ALIGN_TO(N * sizeof(T), device_alignment) / sizeof(T);
// right hand side on device
T *dB;
cusolver_int_t lddb = ALIGN_TO(N * sizeof(T), device_alignment) / sizeof(T);
// solution on device
T *dX;
cusolver_int_t lddx = ALIGN_TO(N * sizeof(T), device_alignment) / sizeof(T);
// pivot sequence on device
cusolver_int_t *dipiv;
// info indicator on device
cusolver_int_t *dinfo;
// work buffer
void *dwork;
// size of work buffer
size_t dwork_size;
// number of refinement iterations returned by solver
cusolver_int_t iter;
std::cout << "Allocating memory on device..." << std::endl;
// allocate data
CUDA_CHECK(cudaMalloc(&dA, ldda * N * sizeof(T)));
CUDA_CHECK(cudaMalloc(&dB, lddb * nrhs * sizeof(T)));
CUDA_CHECK(cudaMalloc(&dX, lddx * nrhs * sizeof(T)));
CUDA_CHECK(cudaMalloc(&dipiv, N * sizeof(cusolver_int_t)));
CUDA_CHECK(cudaMalloc(&dinfo, sizeof(cusolver_int_t)));
// copy input data
CUDA_CHECK(cudaMemcpy2D(dA, ldda * sizeof(T), hA, lda * sizeof(T), N * sizeof(T), N,
cudaMemcpyDefault));
CUDA_CHECK(cudaMemcpy2D(dB, lddb * sizeof(T), hB, ldb * sizeof(T), N * sizeof(T), nrhs,
cudaMemcpyDefault));
// get required device work buffer size
CUSOLVER_CHECK(cusolverDnIRSXgesv_bufferSize(handle, gesv_params, N, nrhs, &dwork_size));
std::cout << "Workspace is " << dwork_size << " bytes" << std::endl;
CUDA_CHECK(cudaMalloc(&dwork, dwork_size));
std::cout << "Solving matrix on device..." << std::endl;
CUDA_CHECK(cudaEventRecord(event_start, stream));
cusolverStatus_t gesv_status =
cusolverDnIRSXgesv(handle, gesv_params, gesv_info, N, nrhs, dA, ldda, dB, lddb, dX, lddx,
dwork, dwork_size, &iter, dinfo);
CUSOLVER_CHECK(gesv_status);
CUDA_CHECK(cudaEventRecord(event_end, stream));
// check solve status
int info = 0;
CUDA_CHECK(
cudaMemcpyAsync(&info, dinfo, sizeof(cusolver_int_t), cudaMemcpyDeviceToHost, stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
std::cout << "Solve info is: " << info << ", iter is: " << iter << std::endl;
CUDA_CHECK(cudaMemcpy2D(hX, ldx * sizeof(T), dX, lddx * sizeof(T), N * sizeof(T), nrhs,
cudaMemcpyDefault));
if (verbose) {
std::cout << "X:\n";
print_matrix(nrhs, N, hX, ldx);
}
CUDA_CHECK(cudaGetLastError());
float solve_time = 0.f;
CUDA_CHECK(cudaEventElapsedTime(&solve_time, event_start, event_end));
std::cout << "Solved matrix " << N << "x" << N << " with " << nrhs << " right hand sides in "
<< solve_time << "ms" << std::endl;
std::cout << "Releasing resources..." << std::endl;
CUDA_CHECK(cudaFree(dwork));
CUDA_CHECK(cudaFree(dinfo));
CUDA_CHECK(cudaFree(dipiv));
CUDA_CHECK(cudaFree(dX));
CUDA_CHECK(cudaFree(dB));
CUDA_CHECK(cudaFree(dA));
free(hA);
free(hB);
free(hX);
CUSOLVER_CHECK(cusolverDnDestroy(handle));
CUDA_CHECK(cudaEventDestroy(event_start));
CUDA_CHECK(cudaEventDestroy(event_end));
CUDA_CHECK(cudaStreamDestroy(stream));
std::cout << "Done!" << std::endl;
return 0;
}
|
1bf8a6cc6f71ed5818a8232dc2e797f5decdbab5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "unsafe.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *shared_var = NULL;
hipMalloc(&shared_var, XSIZE*YSIZE);
int *values_read = NULL;
hipMalloc(&values_read, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iters = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
unsafe), dim3(gridBlock),dim3(threadBlock), 0, 0, shared_var,values_read,N,iters);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
unsafe), dim3(gridBlock),dim3(threadBlock), 0, 0, shared_var,values_read,N,iters);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
unsafe), dim3(gridBlock),dim3(threadBlock), 0, 0, shared_var,values_read,N,iters);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
1bf8a6cc6f71ed5818a8232dc2e797f5decdbab5.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "unsafe.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *shared_var = NULL;
cudaMalloc(&shared_var, XSIZE*YSIZE);
int *values_read = NULL;
cudaMalloc(&values_read, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iters = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
unsafe<<<gridBlock,threadBlock>>>(shared_var,values_read,N,iters);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
unsafe<<<gridBlock,threadBlock>>>(shared_var,values_read,N,iters);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
unsafe<<<gridBlock,threadBlock>>>(shared_var,values_read,N,iters);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ce71a4d1977d6e06c46b3f743152720e988ba5da.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void test(char *string){
printf("string=%s\n",string);
}
}
|
ce71a4d1977d6e06c46b3f743152720e988ba5da.cu
|
#include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void test(char *string){
printf("string=%s\n",string);
}
}
|
functions.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <bits/stdc++.h>
#include <cmath>
#include "../include/common.h"
#include "../include/functions.cuh"
void convolutionOnHost(unsigned char *dst, unsigned char *src, float *kernel,
int kernelSide, const int width, const int height,
const int channels) {
unsigned int margin = int((kernelSide - 1) / 2);
// Loop through each pixel.
for (int y = margin; y < width - margin; y++) {
for (int x = margin; x < height - margin; x++) {
// Loop through each element of the kernel.
for (int dy = 0; dy < kernelSide; dy++) {
for (int dx = 0; dx < kernelSide; dx++) {
// Loop through the channels of the image.
for (int c = 0; c < channels; c++) {
int src_i = channels * ((x + (dx - margin)) * width +
(y + (dy - margin))) +
c;
int ker_i = dx * kernelSide + dy;
int dst_i = channels * (x * width + y) + c;
// Reset dst element at the start of the conv.
if (ker_i == 0) {
dst[dst_i] = 0;
}
// Add result of multiplication.
dst[dst_i] += int(src[src_i] * kernel[ker_i]);
}
}
}
}
}
}
__global__ void convolutionOnDevice(unsigned char *dst, unsigned char *src,
float *kernel, int kernelSide,
const int width, const int height,
const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
unsigned int margin = int((kernelSide - 1) / 2);
int x = (int)i / width;
int y = (i % width);
// Check for minimum padding.
if (y < margin or y > width - margin - 1 or x < margin or
x > height - margin - 1) {
return;
}
// Loop through each element of the kernel.
for (int dy = 0; dy < kernelSide; dy++) {
for (int dx = 0; dx < kernelSide; dx++) {
// Loop through the channels of the image.
for (int c = 0; c < channels; c++) {
int src_i = channels * ((x + (dx - margin)) * width +
(y + (dy - margin))) +
c;
int ker_i = dx * kernelSide + dy;
int dst_i = channels * i + c;
// Reset dst element at the start of the conv.
if (ker_i == 0) {
dst[dst_i] = 0;
}
// Add result of multiplication.
dst[dst_i] += int(src[src_i] * kernel[ker_i]);
}
}
}
}
void drawLineOnHost(unsigned char *data, int x1, int y1, int x2, int y2,
int radius, int *color, int colorSize, int width,
int height, int channels) {
for (int dy = min(y1, y2); dy < max(y1, y2); dy++) {
for (int dx = min(x1, x2); dx < max(x1, x2); dx++) {
int interpolatedY = (y1 * (x2 - dx) + y2 * (dx - x1)) / (x2 - x1);
if (interpolatedY - radius > dy or interpolatedY + radius < dy) {
continue;
}
int index = (dx * width + dy) * channels;
for (int c = 0; c < min(channels, colorSize); c++) {
if (index + c < width * height * channels) {
data[index + c] = color[c];
}
}
}
}
}
__global__ void drawLineOnDevice(unsigned char *data, int x1, int y1, int x2,
int y2, int radius, int *color, int colorSize,
int width, int height, int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
int dx = (int)i / width;
int dy = (i % width);
// Check for boundaries.
int interpolatedY = (y1 * (x2 - dx) + y2 * (dx - x1)) / (x2 - x1);
if (dx < min(x1, x2) or dx >= max(x1, x2) or dy < min(y1, y2) or
dy >= max(y1, y2) or interpolatedY - radius > dy or
interpolatedY + radius < dy) {
return;
}
for (int c = 0; c < min(channels, colorSize); c++) {
int index = channels * i;
if (index + c < width * height * channels) {
data[index + c] = color[c];
}
}
}
void drawPointOnHost(unsigned char *data, int x, int y, int radius, int *color,
int colorSize, int width, int height, int channels) {
for (int dy = max(0, y - radius); dy < y + radius; dy++) {
for (int dx = max(0, x - radius); dx < x + radius; dx++) {
int index = (dx * width + dy) * channels;
for (int c = 0; c < min(channels, colorSize); c++) {
if (index + c < width * height * channels) {
data[index + c] = color[c];
}
}
}
}
}
__global__ void drawPointOnDevice(unsigned char *data, int x, int y, int radius,
int *color, int colorSize, int width,
int height, int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
int dx = (int)i / width;
int dy = (i % width);
// Check for point boundaries.
if (dy < y - radius or dy >= y + radius or dx < x - radius or
dx >= x + radius) {
return;
}
for (int c = 0; c < min(channels, colorSize); c++) {
int index = channels * i;
if (index + c < width * height * channels) {
data[index + c] = color[c];
}
}
}
void differenceOnHost(unsigned char *dst, unsigned char *src, const int width,
const int height, const int channels) {
for (int y = 0; y < width; y++) {
for (int x = 0; x < height; x++) {
for (int c = 0; c < channels; c++) {
int i = channels * (x * width + y) + c;
if (dst[i] > src[i]) {
dst[i] = dst[i] - src[i];
} else {
dst[i] = src[i] - dst[i];
}
}
}
}
}
__global__ void differenceOnDevice(unsigned char *dst, unsigned char *src,
const int width, const int height,
const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height * channels) {
return;
}
if (dst[i] > src[i]) {
dst[i] = dst[i] - src[i];
} else {
dst[i] = src[i] - dst[i];
}
}
void cornerScoreOnHost(unsigned char *gradX, unsigned char *gradY, float *R,
int width, int height) {
const int windowSide = 3;
const int windowMargin = int((windowSide - 1) / 2);
for (int i = 0; i < width * height; i++) {
int x = (int)i / width;
int y = (i % width);
// Check for out-of-bound coordinates.
R[i] = 0;
if (x < windowMargin or y < windowMargin or
x > height - windowMargin - 1 or y > width - windowMargin - 1) {
continue;
}
// Create the windows Ix and Iy.
float *Ix = new float[windowSide * windowSide];
float *Iy = new float[windowSide * windowSide];
for (int wi = 0; wi < windowSide * windowSide; wi++) {
int dx = ((int)wi / windowSide) - windowMargin;
int dy = (wi % windowSide) - windowMargin;
int di = (x + dx) * width + (y + dy);
Ix[wi] = (float)gradX[di] / PIXEL_VALUES;
Iy[wi] = (float)gradY[di] / PIXEL_VALUES;
}
// Construct the structural matrix.
float *M = new float[4];
sumOfMatmulOnHost(&M[0], Ix, Ix, windowSide);
sumOfMatmulOnHost(&M[1], Ix, Iy, windowSide);
sumOfMatmulOnHost(&M[2], Iy, Ix, windowSide);
sumOfMatmulOnHost(&M[3], Iy, Iy, windowSide);
// Evaluate the pixel score.
float m = (M[0] + M[3]) / 2;
float p = (M[0] * M[3]) - (M[1] * M[2]);
float lambda1 = m + sqrt(m * m - p);
float lambda2 = m - sqrt(m * m - p);
R[i] = min(lambda1, lambda2);
// Free memory.
delete[] Ix;
delete[] Iy;
delete[] M;
}
}
__global__ void cornerScoreOnDevice(unsigned char *gradX, unsigned char *gradY,
float *R, int width, int height) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
const int windowSide = 3;
const int windowMargin = int((windowSide - 1) / 2);
// Check for overflow.
if (i >= width * height) {
return;
}
int x = (int)i / width;
int y = (i % width);
// Check for out-of-bound coordinates.
R[i] = 0;
if (x < windowMargin or y < windowMargin or x > height - windowMargin - 1 or
y > width - windowMargin - 1) {
return;
}
// Create the windows Ix and Iy.
float *Ix = new float[windowSide * windowSide];
float *Iy = new float[windowSide * windowSide];
for (int wi = 0; wi < windowSide * windowSide; wi++) {
int dx = ((int)wi / windowSide) - windowMargin;
int dy = (wi % windowSide) - windowMargin;
int di = (x + dx) * width + (y + dy);
Ix[wi] = (float)gradX[di] / PIXEL_VALUES;
Iy[wi] = (float)gradY[di] / PIXEL_VALUES;
}
// Construct the structural matrix.
float *M = new float[4]{0, 0, 0, 0};
sumOfMatmulOnDevice(&M[0], Ix, Ix, windowSide);
sumOfMatmulOnDevice(&M[1], Ix, Iy, windowSide);
sumOfMatmulOnDevice(&M[2], Iy, Ix, windowSide);
sumOfMatmulOnDevice(&M[3], Iy, Iy, windowSide);
// Evaluate the pixel score.
float m = (M[0] + M[3]) / 2;
float p = (M[0] * M[3]) - (M[1] * M[2]);
float lambda1 = m + sqrt(m * m - p);
float lambda2 = m - sqrt(m * m - p);
R[i] = min(lambda1, lambda2);
// Free memory.
delete[] Ix;
delete[] Iy;
delete[] M;
}
void opticalFLowOnHost(int *currentCorners, int *corners, int maxCorners,
unsigned char **currPyramidalScales,
unsigned char **prevPyramidalScales, int levels,
int width0, int height0) {
const int patchSide = 5;
const int windowSide = 9;
const int windowMargin = int((windowSide - 1) / 2);
unsigned char *prevPatch = new unsigned char[patchSide * patchSide];
unsigned char *currPatch = new unsigned char[patchSide * patchSide];
for (int l = levels - 1; l >= 0; l--) {
int width = width0 / pow(2, l);
int height = height0 / pow(2, l);
float minSse;
for (int i = 0; i < maxCorners; i++) {
// Downscale corner from the previous frame.
int lx = (corners[i] / width0) * pow(2, -l);
int ly = (corners[i] % width0) * pow(2, -l);
int prevCorner = int(lx * width + ly);
minSse = 100;
if (l == levels - 1) {
currentCorners[i] = prevCorner;
} else {
// Upscale corner from the previous layer.
int ux = int(currentCorners[i] / (width * 0.5)) * 2;
int uy = (currentCorners[i] % int((width * 0.5))) * 2;
currentCorners[i] = int(ux * width + uy);
}
extractPatchOnHost(prevPatch, prevPyramidalScales[l], prevCorner,
patchSide, width, height);
int x = (int)currentCorners[i] / width;
int y = currentCorners[i] % width;
for (int wi = 0; wi < windowSide * windowSide; wi++) {
int dx = ((int)wi / windowSide) - windowMargin;
int dy = (wi % windowSide) - windowMargin;
int di = (x + dx) * width + (y + dy);
extractPatchOnHost(currPatch, currPyramidalScales[l], di,
patchSide, width, height);
float sse = sumOfSquareDifferencesOnHost(prevPatch, currPatch,
patchSide);
if (sse < minSse) {
currentCorners[i] = di;
minSse = sse;
}
}
}
}
}
__global__ void opticalFLowOnDevice(int *currentCorners, int *corners,
int maxCorners,
unsigned char *currPyramidalScales,
unsigned char *prevPyramidalScales,
int levels, int offsetSize, int width0,
int height0) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
const int patchSide = 5;
const int windowSide = 9;
const int windowMargin = int((windowSide - 1) / 2);
unsigned char *prevPatch = new unsigned char[patchSide * patchSide];
unsigned char *currPatch = new unsigned char[patchSide * patchSide];
for (int l = levels - 1; l >= 0; l--) {
int width = width0 / pow(2, l);
int height = height0 / pow(2, l);
float minSse = 100;
// Downscale corner from the previous frame.
int lx = (corners[i] / width0) * pow(2, -l);
int ly = (corners[i] % width0) * pow(2, -l);
int prevCorner = int(lx * width + ly);
if (l == levels - 1) {
currentCorners[i] = prevCorner;
} else {
// Upscale corner from the previous layer.
int ux = int(currentCorners[i] / (width * 0.5)) * 2;
int uy = (currentCorners[i] % int((width * 0.5))) * 2;
currentCorners[i] = int(ux * width + uy);
}
extractPatchOnDevice(prevPatch, prevPyramidalScales + l * offsetSize,
prevCorner, patchSide, width, height);
int x = (int)currentCorners[i] / width;
int y = currentCorners[i] % width;
for (int wi = 0; wi < windowSide * windowSide; wi++) {
int dx = ((int)wi / windowSide) - windowMargin;
int dy = (wi % windowSide) - windowMargin;
int di = (x + dx) * width + (y + dy);
extractPatchOnDevice(currPatch,
currPyramidalScales + l * offsetSize, di,
patchSide, width, height);
float sse =
sumOfSquareDifferencesOnDevice(prevPatch, currPatch, patchSide);
if (sse < minSse) {
currentCorners[i] = di;
minSse = sse;
}
}
}
delete[] prevPatch;
delete[] currPatch;
}
void rotateOnHost(unsigned char *dst, unsigned char *src, const double radian,
const int width, const int height, const int channels) {
for (int y = 0; y < width; y++) {
for (int x = 0; x < height; x++) {
// Evaluate the source pixels.
int x_center = x - round(height / 2.0);
int y_center = y - round(width / 2.0);
double xa = x_center * cos(-radian) - y_center * sin(-radian) +
round(height / 2.0);
double ya = x_center * sin(-radian) + y_center * cos(-radian) +
round(width / 2.0);
// Check for out-of-bound coordinates.
if (xa < 0 or xa > height or ya < 0 or ya > width) {
// Set pixels to black and exit
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
dst[ib] = 0;
}
continue;
}
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
// Evaluate the four pixels given xs and ys roundings.
int ia[4] = {
channels * (int(floor(xa)) * width + int(floor(ya))) + c,
channels * (int(floor(xa)) * width + int(ceil(ya))) + c,
channels * (int(ceil(xa)) * width + int(floor(ya))) + c,
channels * (int(ceil(xa)) * width + int(ceil(ya))) + c};
// Evaluate the average value of the destination pixel.
float sum = 0.0;
int count = 0;
for (int k = 0; k < 4; k++) {
if (0 <= ia[k] and ia[k] <= width * height * channels) {
sum += src[ia[k]];
count++;
}
}
dst[ib] = int(sum / count);
}
}
}
}
__global__ void rotateOnDevice(unsigned char *dst, unsigned char *src,
const double radian, const int width,
const int height, const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
int x = (int)i / width;
int y = (i % width);
// Evaluate the source pixels.
int x_center = x - round(height / 2.0);
int y_center = y - round(width / 2.0);
double xa =
x_center * cos(-radian) - y_center * sin(-radian) + round(height / 2.0);
double ya =
x_center * sin(-radian) + y_center * cos(-radian) + round(width / 2.0);
// Check for out-of-bound coordinates.
if (xa < 0 or xa > height or ya < 0 or ya > width) {
// Set pixels to black and exit
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
dst[ib] = 0;
}
return;
}
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
// Evaluate the four pixels given xs and ys roundings.
int ia[4] = {channels * (int(floor(xa)) * width + int(floor(ya))) + c,
channels * (int(floor(xa)) * width + int(ceil(ya))) + c,
channels * (int(ceil(xa)) * width + int(floor(ya))) + c,
channels * (int(ceil(xa)) * width + int(ceil(ya))) + c};
// Evaluate the average value of the destination pixel.
float sum = 0.0;
int count = 0;
for (int k = 0; k < 4; k++) {
if (0 <= ia[k] and ia[k] <= width * height * channels) {
sum += src[ia[k]];
count++;
}
}
dst[ib] = int(sum / count);
}
}
void scaleOnHost(unsigned char *dst, unsigned char *src, const double ratio,
const int width, const int height, const int channels) {
int newWidth = width * ratio;
int newHeight = height * ratio;
float inverseRatio = 1.0 / ratio;
for (int y = 0; y < newWidth; y++) {
for (int x = 0; x < newHeight; x++) {
for (int c = 0; c < channels; c++) {
int i = (x * newWidth + y) * channels + c;
float tempValue = 0.0;
for (int dy = -1; dy < 2; dy++) {
for (int dx = -1; dx < 2; dx++) {
int oldI = ((int(inverseRatio * x) + dx) * width +
(int(inverseRatio * y) + dy)) *
channels +
c;
float weight = 1 / (pow(2, 2 + abs(dx) + abs(dy)));
if (oldI < 0 or oldI > width * height * channels) {
continue;
}
tempValue += weight * src[oldI];
}
}
dst[i] = tempValue;
}
}
}
}
__global__ void scaleOnDevice(unsigned char *dst, unsigned char *src,
const double ratio, const int width,
const int height, const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int newWidth = width * ratio;
int newHeight = height * ratio;
float inverseRatio = 1.0 / ratio;
// Check for overflow.
if (i > newWidth * newHeight) {
return;
}
int x = (int)i / newWidth;
int y = (i % newWidth);
for (int c = 0; c < channels; c++) {
float tempValue = 0.0;
for (int dy = -1; dy < 2; dy++) {
for (int dx = -1; dx < 2; dx++) {
int src_i = ((int(inverseRatio * x) + dx) * width +
(int(inverseRatio * y) + dy)) *
channels +
c;
float weight = 1 / (pow(2, 2 + abs(dx) + abs(dy)));
if (src_i < 0 or src_i > width * height * channels) {
continue;
}
tempValue += weight * src[src_i];
}
}
dst[i * channels + c] = tempValue;
}
}
void translateOnHost(unsigned char *dst, unsigned char *src, int px, int py,
const int width, const int height, const int channels) {
for (int y = 0; y < width; y++) {
for (int x = 0; x < height; x++) {
// Evaluate the source pixels.
int xa = x - px;
int ya = y - py;
// Check for out-of-bound coordinates.
if (xa < 0 or xa > height or ya < 0 or ya > width) {
// Set pixels to black and exit
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
dst[ib] = 0;
}
continue;
}
for (int c = 0; c < channels; c++) {
int ia = channels * (xa * width + ya) + c;
int ib = channels * (x * width + y) + c;
dst[ib] = src[ia];
}
}
}
}
__global__ void translateOnDevice(unsigned char *dst, unsigned char *src,
int px, int py, const int width,
const int height, const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
int x = (int)i / width;
int y = (i % width);
// Evaluate the source pixels.
int xa = x - px;
int ya = y - py;
// Check for out-of-bound coordinates.
if (xa < 0 or xa > height or ya < 0 or ya > width) {
// Set pixels to black and exit.
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
dst[ib] = 0;
}
return;
}
for (int c = 0; c < channels; c++) {
int ia = channels * (xa * width + ya) + c;
int ib = channels * (x * width + y) + c;
dst[ib] = src[ia];
}
}
void transposeOnHost(unsigned char *data, const int width, const int height,
const int channels) {
for (int y = 0; y < width; y++) {
for (int x = 0; x < height; x++) {
for (int c = 0; c < channels; c++) {
int ia = channels * (y * width + x) + c;
int ib = channels * (x * height + y) + c;
if (ia > ib) {
continue;
}
unsigned char temp = data[ib];
data[ib] = data[ia];
data[ia] = temp;
}
}
}
}
__global__ void transposeOnDevice(unsigned char *data, const int width,
const int height, const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
for (int c = 0; c < channels; c++) {
int ia = channels * i + c;
int ib = channels * ((i % width) * height + ((int)i / width)) + c;
if (ia > ib) {
continue;
}
unsigned char temp = data[ib];
data[ib] = data[ia];
data[ia] = temp;
}
}
void sumOfMatmulOnHost(float *total, float *A, float *B, int side) {
float *C = new float[side * side];
*total = 0;
for (int i = 0; i < side * side; i++) {
int x = (int)i / side;
int y = (i % side);
C[i] = 0;
for (int d = 0; d < side; d++) {
int ia = x * side + d;
int ib = d * side + y;
C[i] += A[ia] * B[ib];
}
*total += C[i];
}
delete[] C;
}
__device__ void sumOfMatmulOnDevice(float *total, float *A, float *B,
int side) {
*total = 0;
for (int i = 0; i < side * side; i++) {
int x = (int)i / side;
int y = (i % side);
for (int d = 0; d < side; d++) {
int ia = x * side + d;
int ib = d * side + y;
*total += A[ia] * B[ib];
}
}
}
float sumOfSquareDifferencesOnHost(unsigned char *patch1, unsigned char *patch2,
int patchSide) {
float sse = 0.0;
for (int i = 0; i < patchSide * patchSide; i++) {
sse += pow(float(patch1[i] - patch2[i]), 2);
}
return sse;
}
__device__ float sumOfSquareDifferencesOnDevice(unsigned char *patch1,
unsigned char *patch2,
int patchSide) {
float sse = 0.0;
for (int i = 0; i < patchSide * patchSide; i++) {
sse += pow(float(patch1[i] - patch2[i]), 2);
}
return sse;
}
void extractPatchOnHost(unsigned char *patch, unsigned char *data,
int centerIndex, int patchSide, int width, int height) {
const int patchMargin = int((patchSide - 1) / 2);
for (int pi = 0; pi < patchSide * patchSide; pi++) {
int x = (int)centerIndex / width;
int y = centerIndex % width;
int dx = ((int)pi / patchSide) - patchMargin;
int dy = (pi % patchSide) - patchMargin;
int di = (x + dx) * width + (y + dy);
if (di < 0 or di > width * height) {
patch[pi] = 0;
} else {
patch[pi] = data[di];
}
}
}
__device__ void extractPatchOnDevice(unsigned char *patch, unsigned char *data,
int centerIndex, int patchSide, int width,
int height) {
const int patchMargin = int((patchSide - 1) / 2);
for (int pi = 0; pi < patchSide * patchSide; pi++) {
int x = (int)centerIndex / width;
int y = centerIndex % width;
int dx = ((int)pi / patchSide) - patchMargin;
int dy = (pi % patchSide) - patchMargin;
int di = (x + dx) * width + (y + dy);
if (di < 0 or di > width * height) {
patch[pi] = 0;
} else {
patch[pi] = data[di];
}
}
}
void findHomographyRANSACOnHost(float *matrices, float *scores, int maxIter,
int *currentCorners, int *previousCorners,
int maxCorners, int width, int height,
float thresholdError, float minConfidence) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> uniform(0, maxCorners);
const int N_POINTS = 3;
const int SPACE_DIM = 2;
// Create maxIter models.
float *srcTriplet = new float[N_POINTS * SPACE_DIM];
float *dstTriplet = new float[N_POINTS * SPACE_DIM];
float *estPoint = new float[SPACE_DIM];
float *srcPoint = new float[SPACE_DIM];
float *dstPoint = new float[SPACE_DIM];
for (int n = 0; n < maxIter; n++) {
int offset = n * (N_POINTS * (SPACE_DIM + 1));
scores[n] = INFINITY;
// Select the minimum number of data points to estimate a model.
for (int k = 0; k < N_POINTS; k++) {
int i = uniform(gen);
srcTriplet[k * SPACE_DIM] = (int)previousCorners[i] / width;
srcTriplet[k * SPACE_DIM + 1] = previousCorners[i] % width;
dstTriplet[k * SPACE_DIM] = (int)currentCorners[i] / width;
dstTriplet[k * SPACE_DIM + 1] = currentCorners[i] % width;
}
// Estimate the model that fit the hypothetical inliers.
estimateTransformOnHost(matrices + offset, srcTriplet, dstTriplet);
// Count the points that fit the model and the total error.
int nInliers = 0;
float totalError = 0.0;
for (int i = 0; i < maxCorners; i++) {
srcPoint[0] = (int)previousCorners[i] / width;
srcPoint[1] = previousCorners[i] % width;
dstPoint[0] = (int)currentCorners[i] / width;
dstPoint[1] = currentCorners[i] % width;
// Apply the transform and evaluate the error.
applyTransformOnHost(estPoint, srcPoint, matrices + offset);
float reprojError = pow(int(estPoint[0] - dstPoint[0]), 2) +
pow(int(estPoint[1] - dstPoint[1]), 2);
nInliers += int(reprojError < thresholdError);
totalError += reprojError;
}
// Set the matrix score to the error if the confidence is high
// enough.
float confidence = (float)nInliers / maxCorners;
if (confidence >= minConfidence) {
scores[n] = totalError;
}
}
delete[] srcTriplet;
delete[] dstTriplet;
delete[] estPoint;
delete[] srcPoint;
delete[] dstPoint;
}
__global__ void findHomographyRANSACOnDevice(
float *matrices, float *scores, int maxIter, int *currentCorners,
int *previousCorners, int maxCorners, int *randomCornerIndices, int width,
int height, float thresholdError, float minConfidence) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= maxIter) {
return;
}
const int N_POINTS = 3;
const int SPACE_DIM = 2;
// Create maxIter models.
float *srcTriplet = new float[N_POINTS * SPACE_DIM];
float *dstTriplet = new float[N_POINTS * SPACE_DIM];
float *estPoint = new float[SPACE_DIM];
float *srcPoint = new float[SPACE_DIM];
float *dstPoint = new float[SPACE_DIM];
int offset = i * (N_POINTS * (SPACE_DIM + 1));
scores[i] = INFINITY;
// Select the minimum number of data points to estimate a model.
for (int k = 0; k < N_POINTS; k++) {
int index = randomCornerIndices[N_POINTS * i + k];
srcTriplet[k * SPACE_DIM] = (int)previousCorners[index] / width;
srcTriplet[k * SPACE_DIM + 1] = previousCorners[index] % width;
dstTriplet[k * SPACE_DIM] = (int)currentCorners[index] / width;
dstTriplet[k * SPACE_DIM + 1] = currentCorners[index] % width;
}
// Estimate the model that fit the hypothetical inliers.
estimateTransformOnDevice(matrices + offset, srcTriplet, dstTriplet);
// Count the points that fit the model and the total error.
int nInliers = 0;
float totalError = 0.0;
for (int index = 0; index < maxCorners; index++) {
srcPoint[0] = (int)previousCorners[index] / width;
srcPoint[1] = previousCorners[index] % width;
dstPoint[0] = (int)currentCorners[index] / width;
dstPoint[1] = currentCorners[index] % width;
// Apply the transform and evaluate the error.
applyTransformOnDevice(estPoint, srcPoint, matrices + offset);
float reprojError = pow(int(estPoint[0] - dstPoint[0]), 2) +
pow(int(estPoint[1] - dstPoint[1]), 2);
nInliers += int(reprojError < thresholdError);
totalError += reprojError;
}
// Set the matrix score to the error if the confidence is high
// enough.
float confidence = (float)nInliers / maxCorners;
if (confidence >= minConfidence) {
scores[i] = totalError;
}
delete[] srcTriplet;
delete[] dstTriplet;
delete[] estPoint;
delete[] srcPoint;
delete[] dstPoint;
}
void estimateTransformOnHost(float *A, float *Ui, float *vi) {
const int N_POINTS = 3;
const int SPACE_DIM = 2;
// Create X and Y matrices.
float *X = new float[N_POINTS * (SPACE_DIM + 1)];
float *Y = new float[N_POINTS * (SPACE_DIM + 1)];
for (int d = 0; d < SPACE_DIM + 1; d++) {
for (int n = 0; n < N_POINTS; n++) {
int i = d * (N_POINTS) + n;
int j = n * (SPACE_DIM) + d;
if (d == SPACE_DIM) {
X[i] = 1;
Y[i] = int(n >= N_POINTS - 1);
} else {
X[i] = Ui[j];
Y[i] = vi[j];
}
}
}
float *Xi = new float[N_POINTS * (SPACE_DIM + 1)];
invert3x3MatrixOnHost(Xi, X);
// Get the affine transformation matrix.
matmulOnHost(A, Y, Xi, N_POINTS);
delete[] X;
delete[] Y;
delete[] Xi;
}
__device__ void estimateTransformOnDevice(float *A, float *Ui, float *vi) {
const int N_POINTS = 3;
const int SPACE_DIM = 2;
// Create X and Y matrices.
float *X = new float[N_POINTS * (SPACE_DIM + 1)];
float *Y = new float[N_POINTS * (SPACE_DIM + 1)];
for (int d = 0; d < SPACE_DIM + 1; d++) {
for (int n = 0; n < N_POINTS; n++) {
int i = d * (N_POINTS) + n;
int j = n * (SPACE_DIM) + d;
if (d == SPACE_DIM) {
X[i] = 1;
Y[i] = int(n >= N_POINTS - 1);
} else {
X[i] = Ui[j];
Y[i] = vi[j];
}
}
}
float *Xi = new float[N_POINTS * (SPACE_DIM + 1)];
invert3x3MatrixOnDevice(Xi, X);
// Get the affine transformation matrix.
matmulOnDevice(A, Y, Xi, N_POINTS);
delete[] X;
delete[] Y;
delete[] Xi;
}
void invert3x3MatrixOnHost(float *Xi, float *X) {
float det = X[0] * (X[4] * X[8] - X[5] * X[7]) -
X[1] * (X[3] * X[8] - X[5] * X[6]) +
X[2] * (X[3] * X[7] - X[4] * X[6]);
Xi[0] = +float(X[4] * X[8] - X[5] * X[7]) / det;
Xi[1] = -float(X[1] * X[8] - X[2] * X[7]) / det;
Xi[2] = +float(X[1] * X[5] - X[2] * X[4]) / det;
Xi[3] = -float(X[3] * X[8] - X[5] * X[6]) / det;
Xi[4] = +float(X[0] * X[8] - X[2] * X[6]) / det;
Xi[5] = -float(X[0] * X[5] - X[2] * X[3]) / det;
Xi[6] = +float(X[3] * X[7] - X[4] * X[6]) / det;
Xi[7] = -float(X[0] * X[7] - X[1] * X[6]) / det;
Xi[8] = +float(X[0] * X[4] - X[1] * X[3]) / det;
}
__device__ void invert3x3MatrixOnDevice(float *Xi, float *X) {
float det = X[0] * (X[4] * X[8] - X[5] * X[7]) -
X[1] * (X[3] * X[8] - X[5] * X[6]) +
X[2] * (X[3] * X[7] - X[4] * X[6]);
Xi[0] = +float(X[4] * X[8] - X[5] * X[7]) / det;
Xi[1] = -float(X[1] * X[8] - X[2] * X[7]) / det;
Xi[2] = +float(X[1] * X[5] - X[2] * X[4]) / det;
Xi[3] = -float(X[3] * X[8] - X[5] * X[6]) / det;
Xi[4] = +float(X[0] * X[8] - X[2] * X[6]) / det;
Xi[5] = -float(X[0] * X[5] - X[2] * X[3]) / det;
Xi[6] = +float(X[3] * X[7] - X[4] * X[6]) / det;
Xi[7] = -float(X[0] * X[7] - X[1] * X[6]) / det;
Xi[8] = +float(X[0] * X[4] - X[1] * X[3]) / det;
}
void matmulOnHost(float *C, float *A, float *B, int side) {
for (int i = 0; i < side * side; i++) {
int x = (int)i / side;
int y = (i % side);
C[i] = 0;
for (int d = 0; d < side; d++) {
int ia = x * side + d;
int ib = d * side + y;
C[i] += A[ia] * B[ib];
}
}
}
__device__ void matmulOnDevice(float *C, float *A, float *B, int side) {
for (int i = 0; i < side * side; i++) {
int x = (int)i / side;
int y = (i % side);
C[i] = 0;
for (int d = 0; d < side; d++) {
int ia = x * side + d;
int ib = d * side + y;
C[i] += A[ia] * B[ib];
}
}
}
void applyTransformOnHost(float *dst, float *src, float *A) {
const int SPACE_DIM = 2;
for (int i = 0; i < SPACE_DIM; i++) {
dst[i] = 0.0;
dst[i] += src[0] * A[i * 3 + 0];
dst[i] += src[1] * A[i * 3 + 1];
}
}
__device__ void applyTransformOnDevice(float *dst, float *src, float *A) {
const int SPACE_DIM = 2;
for (int i = 0; i < SPACE_DIM; i++) {
dst[i] = 0.0;
dst[i] += src[0] * A[i * 3 + 0];
dst[i] += src[1] * A[i * 3 + 1];
}
}
|
functions.cu
|
#include <bits/stdc++.h>
#include <cmath>
#include "../include/common.h"
#include "../include/functions.cuh"
void convolutionOnHost(unsigned char *dst, unsigned char *src, float *kernel,
int kernelSide, const int width, const int height,
const int channels) {
unsigned int margin = int((kernelSide - 1) / 2);
// Loop through each pixel.
for (int y = margin; y < width - margin; y++) {
for (int x = margin; x < height - margin; x++) {
// Loop through each element of the kernel.
for (int dy = 0; dy < kernelSide; dy++) {
for (int dx = 0; dx < kernelSide; dx++) {
// Loop through the channels of the image.
for (int c = 0; c < channels; c++) {
int src_i = channels * ((x + (dx - margin)) * width +
(y + (dy - margin))) +
c;
int ker_i = dx * kernelSide + dy;
int dst_i = channels * (x * width + y) + c;
// Reset dst element at the start of the conv.
if (ker_i == 0) {
dst[dst_i] = 0;
}
// Add result of multiplication.
dst[dst_i] += int(src[src_i] * kernel[ker_i]);
}
}
}
}
}
}
__global__ void convolutionOnDevice(unsigned char *dst, unsigned char *src,
float *kernel, int kernelSide,
const int width, const int height,
const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
unsigned int margin = int((kernelSide - 1) / 2);
int x = (int)i / width;
int y = (i % width);
// Check for minimum padding.
if (y < margin or y > width - margin - 1 or x < margin or
x > height - margin - 1) {
return;
}
// Loop through each element of the kernel.
for (int dy = 0; dy < kernelSide; dy++) {
for (int dx = 0; dx < kernelSide; dx++) {
// Loop through the channels of the image.
for (int c = 0; c < channels; c++) {
int src_i = channels * ((x + (dx - margin)) * width +
(y + (dy - margin))) +
c;
int ker_i = dx * kernelSide + dy;
int dst_i = channels * i + c;
// Reset dst element at the start of the conv.
if (ker_i == 0) {
dst[dst_i] = 0;
}
// Add result of multiplication.
dst[dst_i] += int(src[src_i] * kernel[ker_i]);
}
}
}
}
void drawLineOnHost(unsigned char *data, int x1, int y1, int x2, int y2,
int radius, int *color, int colorSize, int width,
int height, int channels) {
for (int dy = min(y1, y2); dy < max(y1, y2); dy++) {
for (int dx = min(x1, x2); dx < max(x1, x2); dx++) {
int interpolatedY = (y1 * (x2 - dx) + y2 * (dx - x1)) / (x2 - x1);
if (interpolatedY - radius > dy or interpolatedY + radius < dy) {
continue;
}
int index = (dx * width + dy) * channels;
for (int c = 0; c < min(channels, colorSize); c++) {
if (index + c < width * height * channels) {
data[index + c] = color[c];
}
}
}
}
}
__global__ void drawLineOnDevice(unsigned char *data, int x1, int y1, int x2,
int y2, int radius, int *color, int colorSize,
int width, int height, int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
int dx = (int)i / width;
int dy = (i % width);
// Check for boundaries.
int interpolatedY = (y1 * (x2 - dx) + y2 * (dx - x1)) / (x2 - x1);
if (dx < min(x1, x2) or dx >= max(x1, x2) or dy < min(y1, y2) or
dy >= max(y1, y2) or interpolatedY - radius > dy or
interpolatedY + radius < dy) {
return;
}
for (int c = 0; c < min(channels, colorSize); c++) {
int index = channels * i;
if (index + c < width * height * channels) {
data[index + c] = color[c];
}
}
}
void drawPointOnHost(unsigned char *data, int x, int y, int radius, int *color,
int colorSize, int width, int height, int channels) {
for (int dy = max(0, y - radius); dy < y + radius; dy++) {
for (int dx = max(0, x - radius); dx < x + radius; dx++) {
int index = (dx * width + dy) * channels;
for (int c = 0; c < min(channels, colorSize); c++) {
if (index + c < width * height * channels) {
data[index + c] = color[c];
}
}
}
}
}
__global__ void drawPointOnDevice(unsigned char *data, int x, int y, int radius,
int *color, int colorSize, int width,
int height, int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
int dx = (int)i / width;
int dy = (i % width);
// Check for point boundaries.
if (dy < y - radius or dy >= y + radius or dx < x - radius or
dx >= x + radius) {
return;
}
for (int c = 0; c < min(channels, colorSize); c++) {
int index = channels * i;
if (index + c < width * height * channels) {
data[index + c] = color[c];
}
}
}
void differenceOnHost(unsigned char *dst, unsigned char *src, const int width,
const int height, const int channels) {
for (int y = 0; y < width; y++) {
for (int x = 0; x < height; x++) {
for (int c = 0; c < channels; c++) {
int i = channels * (x * width + y) + c;
if (dst[i] > src[i]) {
dst[i] = dst[i] - src[i];
} else {
dst[i] = src[i] - dst[i];
}
}
}
}
}
__global__ void differenceOnDevice(unsigned char *dst, unsigned char *src,
const int width, const int height,
const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height * channels) {
return;
}
if (dst[i] > src[i]) {
dst[i] = dst[i] - src[i];
} else {
dst[i] = src[i] - dst[i];
}
}
void cornerScoreOnHost(unsigned char *gradX, unsigned char *gradY, float *R,
int width, int height) {
const int windowSide = 3;
const int windowMargin = int((windowSide - 1) / 2);
for (int i = 0; i < width * height; i++) {
int x = (int)i / width;
int y = (i % width);
// Check for out-of-bound coordinates.
R[i] = 0;
if (x < windowMargin or y < windowMargin or
x > height - windowMargin - 1 or y > width - windowMargin - 1) {
continue;
}
// Create the windows Ix and Iy.
float *Ix = new float[windowSide * windowSide];
float *Iy = new float[windowSide * windowSide];
for (int wi = 0; wi < windowSide * windowSide; wi++) {
int dx = ((int)wi / windowSide) - windowMargin;
int dy = (wi % windowSide) - windowMargin;
int di = (x + dx) * width + (y + dy);
Ix[wi] = (float)gradX[di] / PIXEL_VALUES;
Iy[wi] = (float)gradY[di] / PIXEL_VALUES;
}
// Construct the structural matrix.
float *M = new float[4];
sumOfMatmulOnHost(&M[0], Ix, Ix, windowSide);
sumOfMatmulOnHost(&M[1], Ix, Iy, windowSide);
sumOfMatmulOnHost(&M[2], Iy, Ix, windowSide);
sumOfMatmulOnHost(&M[3], Iy, Iy, windowSide);
// Evaluate the pixel score.
float m = (M[0] + M[3]) / 2;
float p = (M[0] * M[3]) - (M[1] * M[2]);
float lambda1 = m + sqrt(m * m - p);
float lambda2 = m - sqrt(m * m - p);
R[i] = min(lambda1, lambda2);
// Free memory.
delete[] Ix;
delete[] Iy;
delete[] M;
}
}
__global__ void cornerScoreOnDevice(unsigned char *gradX, unsigned char *gradY,
float *R, int width, int height) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
const int windowSide = 3;
const int windowMargin = int((windowSide - 1) / 2);
// Check for overflow.
if (i >= width * height) {
return;
}
int x = (int)i / width;
int y = (i % width);
// Check for out-of-bound coordinates.
R[i] = 0;
if (x < windowMargin or y < windowMargin or x > height - windowMargin - 1 or
y > width - windowMargin - 1) {
return;
}
// Create the windows Ix and Iy.
float *Ix = new float[windowSide * windowSide];
float *Iy = new float[windowSide * windowSide];
for (int wi = 0; wi < windowSide * windowSide; wi++) {
int dx = ((int)wi / windowSide) - windowMargin;
int dy = (wi % windowSide) - windowMargin;
int di = (x + dx) * width + (y + dy);
Ix[wi] = (float)gradX[di] / PIXEL_VALUES;
Iy[wi] = (float)gradY[di] / PIXEL_VALUES;
}
// Construct the structural matrix.
float *M = new float[4]{0, 0, 0, 0};
sumOfMatmulOnDevice(&M[0], Ix, Ix, windowSide);
sumOfMatmulOnDevice(&M[1], Ix, Iy, windowSide);
sumOfMatmulOnDevice(&M[2], Iy, Ix, windowSide);
sumOfMatmulOnDevice(&M[3], Iy, Iy, windowSide);
// Evaluate the pixel score.
float m = (M[0] + M[3]) / 2;
float p = (M[0] * M[3]) - (M[1] * M[2]);
float lambda1 = m + sqrt(m * m - p);
float lambda2 = m - sqrt(m * m - p);
R[i] = min(lambda1, lambda2);
// Free memory.
delete[] Ix;
delete[] Iy;
delete[] M;
}
void opticalFLowOnHost(int *currentCorners, int *corners, int maxCorners,
unsigned char **currPyramidalScales,
unsigned char **prevPyramidalScales, int levels,
int width0, int height0) {
const int patchSide = 5;
const int windowSide = 9;
const int windowMargin = int((windowSide - 1) / 2);
unsigned char *prevPatch = new unsigned char[patchSide * patchSide];
unsigned char *currPatch = new unsigned char[patchSide * patchSide];
for (int l = levels - 1; l >= 0; l--) {
int width = width0 / pow(2, l);
int height = height0 / pow(2, l);
float minSse;
for (int i = 0; i < maxCorners; i++) {
// Downscale corner from the previous frame.
int lx = (corners[i] / width0) * pow(2, -l);
int ly = (corners[i] % width0) * pow(2, -l);
int prevCorner = int(lx * width + ly);
minSse = 100;
if (l == levels - 1) {
currentCorners[i] = prevCorner;
} else {
// Upscale corner from the previous layer.
int ux = int(currentCorners[i] / (width * 0.5)) * 2;
int uy = (currentCorners[i] % int((width * 0.5))) * 2;
currentCorners[i] = int(ux * width + uy);
}
extractPatchOnHost(prevPatch, prevPyramidalScales[l], prevCorner,
patchSide, width, height);
int x = (int)currentCorners[i] / width;
int y = currentCorners[i] % width;
for (int wi = 0; wi < windowSide * windowSide; wi++) {
int dx = ((int)wi / windowSide) - windowMargin;
int dy = (wi % windowSide) - windowMargin;
int di = (x + dx) * width + (y + dy);
extractPatchOnHost(currPatch, currPyramidalScales[l], di,
patchSide, width, height);
float sse = sumOfSquareDifferencesOnHost(prevPatch, currPatch,
patchSide);
if (sse < minSse) {
currentCorners[i] = di;
minSse = sse;
}
}
}
}
}
__global__ void opticalFLowOnDevice(int *currentCorners, int *corners,
int maxCorners,
unsigned char *currPyramidalScales,
unsigned char *prevPyramidalScales,
int levels, int offsetSize, int width0,
int height0) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
const int patchSide = 5;
const int windowSide = 9;
const int windowMargin = int((windowSide - 1) / 2);
unsigned char *prevPatch = new unsigned char[patchSide * patchSide];
unsigned char *currPatch = new unsigned char[patchSide * patchSide];
for (int l = levels - 1; l >= 0; l--) {
int width = width0 / pow(2, l);
int height = height0 / pow(2, l);
float minSse = 100;
// Downscale corner from the previous frame.
int lx = (corners[i] / width0) * pow(2, -l);
int ly = (corners[i] % width0) * pow(2, -l);
int prevCorner = int(lx * width + ly);
if (l == levels - 1) {
currentCorners[i] = prevCorner;
} else {
// Upscale corner from the previous layer.
int ux = int(currentCorners[i] / (width * 0.5)) * 2;
int uy = (currentCorners[i] % int((width * 0.5))) * 2;
currentCorners[i] = int(ux * width + uy);
}
extractPatchOnDevice(prevPatch, prevPyramidalScales + l * offsetSize,
prevCorner, patchSide, width, height);
int x = (int)currentCorners[i] / width;
int y = currentCorners[i] % width;
for (int wi = 0; wi < windowSide * windowSide; wi++) {
int dx = ((int)wi / windowSide) - windowMargin;
int dy = (wi % windowSide) - windowMargin;
int di = (x + dx) * width + (y + dy);
extractPatchOnDevice(currPatch,
currPyramidalScales + l * offsetSize, di,
patchSide, width, height);
float sse =
sumOfSquareDifferencesOnDevice(prevPatch, currPatch, patchSide);
if (sse < minSse) {
currentCorners[i] = di;
minSse = sse;
}
}
}
delete[] prevPatch;
delete[] currPatch;
}
void rotateOnHost(unsigned char *dst, unsigned char *src, const double radian,
const int width, const int height, const int channels) {
for (int y = 0; y < width; y++) {
for (int x = 0; x < height; x++) {
// Evaluate the source pixels.
int x_center = x - round(height / 2.0);
int y_center = y - round(width / 2.0);
double xa = x_center * cos(-radian) - y_center * sin(-radian) +
round(height / 2.0);
double ya = x_center * sin(-radian) + y_center * cos(-radian) +
round(width / 2.0);
// Check for out-of-bound coordinates.
if (xa < 0 or xa > height or ya < 0 or ya > width) {
// Set pixels to black and exit
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
dst[ib] = 0;
}
continue;
}
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
// Evaluate the four pixels given xs and ys roundings.
int ia[4] = {
channels * (int(floor(xa)) * width + int(floor(ya))) + c,
channels * (int(floor(xa)) * width + int(ceil(ya))) + c,
channels * (int(ceil(xa)) * width + int(floor(ya))) + c,
channels * (int(ceil(xa)) * width + int(ceil(ya))) + c};
// Evaluate the average value of the destination pixel.
float sum = 0.0;
int count = 0;
for (int k = 0; k < 4; k++) {
if (0 <= ia[k] and ia[k] <= width * height * channels) {
sum += src[ia[k]];
count++;
}
}
dst[ib] = int(sum / count);
}
}
}
}
__global__ void rotateOnDevice(unsigned char *dst, unsigned char *src,
const double radian, const int width,
const int height, const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
int x = (int)i / width;
int y = (i % width);
// Evaluate the source pixels.
int x_center = x - round(height / 2.0);
int y_center = y - round(width / 2.0);
double xa =
x_center * cos(-radian) - y_center * sin(-radian) + round(height / 2.0);
double ya =
x_center * sin(-radian) + y_center * cos(-radian) + round(width / 2.0);
// Check for out-of-bound coordinates.
if (xa < 0 or xa > height or ya < 0 or ya > width) {
// Set pixels to black and exit
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
dst[ib] = 0;
}
return;
}
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
// Evaluate the four pixels given xs and ys roundings.
int ia[4] = {channels * (int(floor(xa)) * width + int(floor(ya))) + c,
channels * (int(floor(xa)) * width + int(ceil(ya))) + c,
channels * (int(ceil(xa)) * width + int(floor(ya))) + c,
channels * (int(ceil(xa)) * width + int(ceil(ya))) + c};
// Evaluate the average value of the destination pixel.
float sum = 0.0;
int count = 0;
for (int k = 0; k < 4; k++) {
if (0 <= ia[k] and ia[k] <= width * height * channels) {
sum += src[ia[k]];
count++;
}
}
dst[ib] = int(sum / count);
}
}
void scaleOnHost(unsigned char *dst, unsigned char *src, const double ratio,
const int width, const int height, const int channels) {
int newWidth = width * ratio;
int newHeight = height * ratio;
float inverseRatio = 1.0 / ratio;
for (int y = 0; y < newWidth; y++) {
for (int x = 0; x < newHeight; x++) {
for (int c = 0; c < channels; c++) {
int i = (x * newWidth + y) * channels + c;
float tempValue = 0.0;
for (int dy = -1; dy < 2; dy++) {
for (int dx = -1; dx < 2; dx++) {
int oldI = ((int(inverseRatio * x) + dx) * width +
(int(inverseRatio * y) + dy)) *
channels +
c;
float weight = 1 / (pow(2, 2 + abs(dx) + abs(dy)));
if (oldI < 0 or oldI > width * height * channels) {
continue;
}
tempValue += weight * src[oldI];
}
}
dst[i] = tempValue;
}
}
}
}
__global__ void scaleOnDevice(unsigned char *dst, unsigned char *src,
const double ratio, const int width,
const int height, const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int newWidth = width * ratio;
int newHeight = height * ratio;
float inverseRatio = 1.0 / ratio;
// Check for overflow.
if (i > newWidth * newHeight) {
return;
}
int x = (int)i / newWidth;
int y = (i % newWidth);
for (int c = 0; c < channels; c++) {
float tempValue = 0.0;
for (int dy = -1; dy < 2; dy++) {
for (int dx = -1; dx < 2; dx++) {
int src_i = ((int(inverseRatio * x) + dx) * width +
(int(inverseRatio * y) + dy)) *
channels +
c;
float weight = 1 / (pow(2, 2 + abs(dx) + abs(dy)));
if (src_i < 0 or src_i > width * height * channels) {
continue;
}
tempValue += weight * src[src_i];
}
}
dst[i * channels + c] = tempValue;
}
}
void translateOnHost(unsigned char *dst, unsigned char *src, int px, int py,
const int width, const int height, const int channels) {
for (int y = 0; y < width; y++) {
for (int x = 0; x < height; x++) {
// Evaluate the source pixels.
int xa = x - px;
int ya = y - py;
// Check for out-of-bound coordinates.
if (xa < 0 or xa > height or ya < 0 or ya > width) {
// Set pixels to black and exit
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
dst[ib] = 0;
}
continue;
}
for (int c = 0; c < channels; c++) {
int ia = channels * (xa * width + ya) + c;
int ib = channels * (x * width + y) + c;
dst[ib] = src[ia];
}
}
}
}
__global__ void translateOnDevice(unsigned char *dst, unsigned char *src,
int px, int py, const int width,
const int height, const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
int x = (int)i / width;
int y = (i % width);
// Evaluate the source pixels.
int xa = x - px;
int ya = y - py;
// Check for out-of-bound coordinates.
if (xa < 0 or xa > height or ya < 0 or ya > width) {
// Set pixels to black and exit.
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
dst[ib] = 0;
}
return;
}
for (int c = 0; c < channels; c++) {
int ia = channels * (xa * width + ya) + c;
int ib = channels * (x * width + y) + c;
dst[ib] = src[ia];
}
}
void transposeOnHost(unsigned char *data, const int width, const int height,
const int channels) {
for (int y = 0; y < width; y++) {
for (int x = 0; x < height; x++) {
for (int c = 0; c < channels; c++) {
int ia = channels * (y * width + x) + c;
int ib = channels * (x * height + y) + c;
if (ia > ib) {
continue;
}
unsigned char temp = data[ib];
data[ib] = data[ia];
data[ia] = temp;
}
}
}
}
__global__ void transposeOnDevice(unsigned char *data, const int width,
const int height, const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
for (int c = 0; c < channels; c++) {
int ia = channels * i + c;
int ib = channels * ((i % width) * height + ((int)i / width)) + c;
if (ia > ib) {
continue;
}
unsigned char temp = data[ib];
data[ib] = data[ia];
data[ia] = temp;
}
}
void sumOfMatmulOnHost(float *total, float *A, float *B, int side) {
float *C = new float[side * side];
*total = 0;
for (int i = 0; i < side * side; i++) {
int x = (int)i / side;
int y = (i % side);
C[i] = 0;
for (int d = 0; d < side; d++) {
int ia = x * side + d;
int ib = d * side + y;
C[i] += A[ia] * B[ib];
}
*total += C[i];
}
delete[] C;
}
__device__ void sumOfMatmulOnDevice(float *total, float *A, float *B,
int side) {
*total = 0;
for (int i = 0; i < side * side; i++) {
int x = (int)i / side;
int y = (i % side);
for (int d = 0; d < side; d++) {
int ia = x * side + d;
int ib = d * side + y;
*total += A[ia] * B[ib];
}
}
}
float sumOfSquareDifferencesOnHost(unsigned char *patch1, unsigned char *patch2,
int patchSide) {
float sse = 0.0;
for (int i = 0; i < patchSide * patchSide; i++) {
sse += pow(float(patch1[i] - patch2[i]), 2);
}
return sse;
}
__device__ float sumOfSquareDifferencesOnDevice(unsigned char *patch1,
unsigned char *patch2,
int patchSide) {
float sse = 0.0;
for (int i = 0; i < patchSide * patchSide; i++) {
sse += pow(float(patch1[i] - patch2[i]), 2);
}
return sse;
}
void extractPatchOnHost(unsigned char *patch, unsigned char *data,
int centerIndex, int patchSide, int width, int height) {
const int patchMargin = int((patchSide - 1) / 2);
for (int pi = 0; pi < patchSide * patchSide; pi++) {
int x = (int)centerIndex / width;
int y = centerIndex % width;
int dx = ((int)pi / patchSide) - patchMargin;
int dy = (pi % patchSide) - patchMargin;
int di = (x + dx) * width + (y + dy);
if (di < 0 or di > width * height) {
patch[pi] = 0;
} else {
patch[pi] = data[di];
}
}
}
__device__ void extractPatchOnDevice(unsigned char *patch, unsigned char *data,
int centerIndex, int patchSide, int width,
int height) {
const int patchMargin = int((patchSide - 1) / 2);
for (int pi = 0; pi < patchSide * patchSide; pi++) {
int x = (int)centerIndex / width;
int y = centerIndex % width;
int dx = ((int)pi / patchSide) - patchMargin;
int dy = (pi % patchSide) - patchMargin;
int di = (x + dx) * width + (y + dy);
if (di < 0 or di > width * height) {
patch[pi] = 0;
} else {
patch[pi] = data[di];
}
}
}
void findHomographyRANSACOnHost(float *matrices, float *scores, int maxIter,
int *currentCorners, int *previousCorners,
int maxCorners, int width, int height,
float thresholdError, float minConfidence) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> uniform(0, maxCorners);
const int N_POINTS = 3;
const int SPACE_DIM = 2;
// Create maxIter models.
float *srcTriplet = new float[N_POINTS * SPACE_DIM];
float *dstTriplet = new float[N_POINTS * SPACE_DIM];
float *estPoint = new float[SPACE_DIM];
float *srcPoint = new float[SPACE_DIM];
float *dstPoint = new float[SPACE_DIM];
for (int n = 0; n < maxIter; n++) {
int offset = n * (N_POINTS * (SPACE_DIM + 1));
scores[n] = INFINITY;
// Select the minimum number of data points to estimate a model.
for (int k = 0; k < N_POINTS; k++) {
int i = uniform(gen);
srcTriplet[k * SPACE_DIM] = (int)previousCorners[i] / width;
srcTriplet[k * SPACE_DIM + 1] = previousCorners[i] % width;
dstTriplet[k * SPACE_DIM] = (int)currentCorners[i] / width;
dstTriplet[k * SPACE_DIM + 1] = currentCorners[i] % width;
}
// Estimate the model that fit the hypothetical inliers.
estimateTransformOnHost(matrices + offset, srcTriplet, dstTriplet);
// Count the points that fit the model and the total error.
int nInliers = 0;
float totalError = 0.0;
for (int i = 0; i < maxCorners; i++) {
srcPoint[0] = (int)previousCorners[i] / width;
srcPoint[1] = previousCorners[i] % width;
dstPoint[0] = (int)currentCorners[i] / width;
dstPoint[1] = currentCorners[i] % width;
// Apply the transform and evaluate the error.
applyTransformOnHost(estPoint, srcPoint, matrices + offset);
float reprojError = pow(int(estPoint[0] - dstPoint[0]), 2) +
pow(int(estPoint[1] - dstPoint[1]), 2);
nInliers += int(reprojError < thresholdError);
totalError += reprojError;
}
// Set the matrix score to the error if the confidence is high
// enough.
float confidence = (float)nInliers / maxCorners;
if (confidence >= minConfidence) {
scores[n] = totalError;
}
}
delete[] srcTriplet;
delete[] dstTriplet;
delete[] estPoint;
delete[] srcPoint;
delete[] dstPoint;
}
__global__ void findHomographyRANSACOnDevice(
float *matrices, float *scores, int maxIter, int *currentCorners,
int *previousCorners, int maxCorners, int *randomCornerIndices, int width,
int height, float thresholdError, float minConfidence) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= maxIter) {
return;
}
const int N_POINTS = 3;
const int SPACE_DIM = 2;
// Create maxIter models.
float *srcTriplet = new float[N_POINTS * SPACE_DIM];
float *dstTriplet = new float[N_POINTS * SPACE_DIM];
float *estPoint = new float[SPACE_DIM];
float *srcPoint = new float[SPACE_DIM];
float *dstPoint = new float[SPACE_DIM];
int offset = i * (N_POINTS * (SPACE_DIM + 1));
scores[i] = INFINITY;
// Select the minimum number of data points to estimate a model.
for (int k = 0; k < N_POINTS; k++) {
int index = randomCornerIndices[N_POINTS * i + k];
srcTriplet[k * SPACE_DIM] = (int)previousCorners[index] / width;
srcTriplet[k * SPACE_DIM + 1] = previousCorners[index] % width;
dstTriplet[k * SPACE_DIM] = (int)currentCorners[index] / width;
dstTriplet[k * SPACE_DIM + 1] = currentCorners[index] % width;
}
// Estimate the model that fit the hypothetical inliers.
estimateTransformOnDevice(matrices + offset, srcTriplet, dstTriplet);
// Count the points that fit the model and the total error.
int nInliers = 0;
float totalError = 0.0;
for (int index = 0; index < maxCorners; index++) {
srcPoint[0] = (int)previousCorners[index] / width;
srcPoint[1] = previousCorners[index] % width;
dstPoint[0] = (int)currentCorners[index] / width;
dstPoint[1] = currentCorners[index] % width;
// Apply the transform and evaluate the error.
applyTransformOnDevice(estPoint, srcPoint, matrices + offset);
float reprojError = pow(int(estPoint[0] - dstPoint[0]), 2) +
pow(int(estPoint[1] - dstPoint[1]), 2);
nInliers += int(reprojError < thresholdError);
totalError += reprojError;
}
// Set the matrix score to the error if the confidence is high
// enough.
float confidence = (float)nInliers / maxCorners;
if (confidence >= minConfidence) {
scores[i] = totalError;
}
delete[] srcTriplet;
delete[] dstTriplet;
delete[] estPoint;
delete[] srcPoint;
delete[] dstPoint;
}
void estimateTransformOnHost(float *A, float *Ui, float *vi) {
const int N_POINTS = 3;
const int SPACE_DIM = 2;
// Create X and Y matrices.
float *X = new float[N_POINTS * (SPACE_DIM + 1)];
float *Y = new float[N_POINTS * (SPACE_DIM + 1)];
for (int d = 0; d < SPACE_DIM + 1; d++) {
for (int n = 0; n < N_POINTS; n++) {
int i = d * (N_POINTS) + n;
int j = n * (SPACE_DIM) + d;
if (d == SPACE_DIM) {
X[i] = 1;
Y[i] = int(n >= N_POINTS - 1);
} else {
X[i] = Ui[j];
Y[i] = vi[j];
}
}
}
float *Xi = new float[N_POINTS * (SPACE_DIM + 1)];
invert3x3MatrixOnHost(Xi, X);
// Get the affine transformation matrix.
matmulOnHost(A, Y, Xi, N_POINTS);
delete[] X;
delete[] Y;
delete[] Xi;
}
__device__ void estimateTransformOnDevice(float *A, float *Ui, float *vi) {
const int N_POINTS = 3;
const int SPACE_DIM = 2;
// Create X and Y matrices.
float *X = new float[N_POINTS * (SPACE_DIM + 1)];
float *Y = new float[N_POINTS * (SPACE_DIM + 1)];
for (int d = 0; d < SPACE_DIM + 1; d++) {
for (int n = 0; n < N_POINTS; n++) {
int i = d * (N_POINTS) + n;
int j = n * (SPACE_DIM) + d;
if (d == SPACE_DIM) {
X[i] = 1;
Y[i] = int(n >= N_POINTS - 1);
} else {
X[i] = Ui[j];
Y[i] = vi[j];
}
}
}
float *Xi = new float[N_POINTS * (SPACE_DIM + 1)];
invert3x3MatrixOnDevice(Xi, X);
// Get the affine transformation matrix.
matmulOnDevice(A, Y, Xi, N_POINTS);
delete[] X;
delete[] Y;
delete[] Xi;
}
void invert3x3MatrixOnHost(float *Xi, float *X) {
float det = X[0] * (X[4] * X[8] - X[5] * X[7]) -
X[1] * (X[3] * X[8] - X[5] * X[6]) +
X[2] * (X[3] * X[7] - X[4] * X[6]);
Xi[0] = +float(X[4] * X[8] - X[5] * X[7]) / det;
Xi[1] = -float(X[1] * X[8] - X[2] * X[7]) / det;
Xi[2] = +float(X[1] * X[5] - X[2] * X[4]) / det;
Xi[3] = -float(X[3] * X[8] - X[5] * X[6]) / det;
Xi[4] = +float(X[0] * X[8] - X[2] * X[6]) / det;
Xi[5] = -float(X[0] * X[5] - X[2] * X[3]) / det;
Xi[6] = +float(X[3] * X[7] - X[4] * X[6]) / det;
Xi[7] = -float(X[0] * X[7] - X[1] * X[6]) / det;
Xi[8] = +float(X[0] * X[4] - X[1] * X[3]) / det;
}
__device__ void invert3x3MatrixOnDevice(float *Xi, float *X) {
float det = X[0] * (X[4] * X[8] - X[5] * X[7]) -
X[1] * (X[3] * X[8] - X[5] * X[6]) +
X[2] * (X[3] * X[7] - X[4] * X[6]);
Xi[0] = +float(X[4] * X[8] - X[5] * X[7]) / det;
Xi[1] = -float(X[1] * X[8] - X[2] * X[7]) / det;
Xi[2] = +float(X[1] * X[5] - X[2] * X[4]) / det;
Xi[3] = -float(X[3] * X[8] - X[5] * X[6]) / det;
Xi[4] = +float(X[0] * X[8] - X[2] * X[6]) / det;
Xi[5] = -float(X[0] * X[5] - X[2] * X[3]) / det;
Xi[6] = +float(X[3] * X[7] - X[4] * X[6]) / det;
Xi[7] = -float(X[0] * X[7] - X[1] * X[6]) / det;
Xi[8] = +float(X[0] * X[4] - X[1] * X[3]) / det;
}
void matmulOnHost(float *C, float *A, float *B, int side) {
for (int i = 0; i < side * side; i++) {
int x = (int)i / side;
int y = (i % side);
C[i] = 0;
for (int d = 0; d < side; d++) {
int ia = x * side + d;
int ib = d * side + y;
C[i] += A[ia] * B[ib];
}
}
}
__device__ void matmulOnDevice(float *C, float *A, float *B, int side) {
for (int i = 0; i < side * side; i++) {
int x = (int)i / side;
int y = (i % side);
C[i] = 0;
for (int d = 0; d < side; d++) {
int ia = x * side + d;
int ib = d * side + y;
C[i] += A[ia] * B[ib];
}
}
}
void applyTransformOnHost(float *dst, float *src, float *A) {
const int SPACE_DIM = 2;
for (int i = 0; i < SPACE_DIM; i++) {
dst[i] = 0.0;
dst[i] += src[0] * A[i * 3 + 0];
dst[i] += src[1] * A[i * 3 + 1];
}
}
__device__ void applyTransformOnDevice(float *dst, float *src, float *A) {
const int SPACE_DIM = 2;
for (int i = 0; i < SPACE_DIM; i++) {
dst[i] = 0.0;
dst[i] += src[0] * A[i * 3 + 0];
dst[i] += src[1] * A[i * 3 + 1];
}
}
|
functions.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <bits/stdc++.h>
#include <cmath>
#include "../include/common.h"
#include "../include/functions.cuh"
void convolutionOnHost(unsigned char *dst, unsigned char *src, float *kernel,
int kernelSide, const int width, const int height,
const int channels) {
unsigned int margin = int((kernelSide - 1) / 2);
// Loop through each pixel.
for (int y = margin; y < width - margin; y++) {
for (int x = margin; x < height - margin; x++) {
// Loop through each element of the kernel.
for (int dy = 0; dy < kernelSide; dy++) {
for (int dx = 0; dx < kernelSide; dx++) {
// Loop through the channels of the image.
for (int c = 0; c < channels; c++) {
int src_i = channels * ((x + (dx - margin)) * width +
(y + (dy - margin))) +
c;
int ker_i = dx * kernelSide + dy;
int dst_i = channels * (x * width + y) + c;
// Reset dst element at the start of the conv.
if (ker_i == 0) {
dst[dst_i] = 0;
}
// Add result of multiplication.
dst[dst_i] += int(src[src_i] * kernel[ker_i]);
}
}
}
}
}
}
__global__ void convolutionOnDevice(unsigned char *dst, unsigned char *src,
float *kernel, int kernelSide,
const int width, const int height,
const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
unsigned int margin = int((kernelSide - 1) / 2);
int x = (int)i / width;
int y = (i % width);
// Check for minimum padding.
if (y < margin or y > width - margin - 1 or x < margin or
x > height - margin - 1) {
return;
}
// Loop through each element of the kernel.
for (int dy = 0; dy < kernelSide; dy++) {
for (int dx = 0; dx < kernelSide; dx++) {
// Loop through the channels of the image.
for (int c = 0; c < channels; c++) {
int src_i = channels * ((x + (dx - margin)) * width +
(y + (dy - margin))) +
c;
int ker_i = dx * kernelSide + dy;
int dst_i = channels * i + c;
// Reset dst element at the start of the conv.
if (ker_i == 0) {
dst[dst_i] = 0;
}
// Add result of multiplication.
dst[dst_i] += int(src[src_i] * kernel[ker_i]);
}
}
}
}
void drawLineOnHost(unsigned char *data, int x1, int y1, int x2, int y2,
int radius, int *color, int colorSize, int width,
int height, int channels) {
for (int dy = min(y1, y2); dy < max(y1, y2); dy++) {
for (int dx = min(x1, x2); dx < max(x1, x2); dx++) {
int interpolatedY = (y1 * (x2 - dx) + y2 * (dx - x1)) / (x2 - x1);
if (interpolatedY - radius > dy or interpolatedY + radius < dy) {
continue;
}
int index = (dx * width + dy) * channels;
for (int c = 0; c < min(channels, colorSize); c++) {
if (index + c < width * height * channels) {
data[index + c] = color[c];
}
}
}
}
}
__global__ void drawLineOnDevice(unsigned char *data, int x1, int y1, int x2,
int y2, int radius, int *color, int colorSize,
int width, int height, int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
int dx = (int)i / width;
int dy = (i % width);
// Check for boundaries.
int interpolatedY = (y1 * (x2 - dx) + y2 * (dx - x1)) / (x2 - x1);
if (dx < min(x1, x2) or dx >= max(x1, x2) or dy < min(y1, y2) or
dy >= max(y1, y2) or interpolatedY - radius > dy or
interpolatedY + radius < dy) {
return;
}
for (int c = 0; c < min(channels, colorSize); c++) {
int index = channels * i;
if (index + c < width * height * channels) {
data[index + c] = color[c];
}
}
}
void drawPointOnHost(unsigned char *data, int x, int y, int radius, int *color,
int colorSize, int width, int height, int channels) {
for (int dy = max(0, y - radius); dy < y + radius; dy++) {
for (int dx = max(0, x - radius); dx < x + radius; dx++) {
int index = (dx * width + dy) * channels;
for (int c = 0; c < min(channels, colorSize); c++) {
if (index + c < width * height * channels) {
data[index + c] = color[c];
}
}
}
}
}
__global__ void drawPointOnDevice(unsigned char *data, int x, int y, int radius,
int *color, int colorSize, int width,
int height, int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
int dx = (int)i / width;
int dy = (i % width);
// Check for point boundaries.
if (dy < y - radius or dy >= y + radius or dx < x - radius or
dx >= x + radius) {
return;
}
for (int c = 0; c < min(channels, colorSize); c++) {
int index = channels * i;
if (index + c < width * height * channels) {
data[index + c] = color[c];
}
}
}
void differenceOnHost(unsigned char *dst, unsigned char *src, const int width,
const int height, const int channels) {
for (int y = 0; y < width; y++) {
for (int x = 0; x < height; x++) {
for (int c = 0; c < channels; c++) {
int i = channels * (x * width + y) + c;
if (dst[i] > src[i]) {
dst[i] = dst[i] - src[i];
} else {
dst[i] = src[i] - dst[i];
}
}
}
}
}
__global__ void differenceOnDevice(unsigned char *dst, unsigned char *src,
const int width, const int height,
const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height * channels) {
return;
}
if (dst[i] > src[i]) {
dst[i] = dst[i] - src[i];
} else {
dst[i] = src[i] - dst[i];
}
}
void cornerScoreOnHost(unsigned char *gradX, unsigned char *gradY, float *R,
int width, int height) {
const int windowSide = 3;
const int windowMargin = int((windowSide - 1) / 2);
for (int i = 0; i < width * height; i++) {
int x = (int)i / width;
int y = (i % width);
// Check for out-of-bound coordinates.
R[i] = 0;
if (x < windowMargin or y < windowMargin or
x > height - windowMargin - 1 or y > width - windowMargin - 1) {
continue;
}
// Create the windows Ix and Iy.
float *Ix = new float[windowSide * windowSide];
float *Iy = new float[windowSide * windowSide];
for (int wi = 0; wi < windowSide * windowSide; wi++) {
int dx = ((int)wi / windowSide) - windowMargin;
int dy = (wi % windowSide) - windowMargin;
int di = (x + dx) * width + (y + dy);
Ix[wi] = (float)gradX[di] / PIXEL_VALUES;
Iy[wi] = (float)gradY[di] / PIXEL_VALUES;
}
// Construct the structural matrix.
float *M = new float[4];
sumOfMatmulOnHost(&M[0], Ix, Ix, windowSide);
sumOfMatmulOnHost(&M[1], Ix, Iy, windowSide);
sumOfMatmulOnHost(&M[2], Iy, Ix, windowSide);
sumOfMatmulOnHost(&M[3], Iy, Iy, windowSide);
// Evaluate the pixel score.
float m = (M[0] + M[3]) / 2;
float p = (M[0] * M[3]) - (M[1] * M[2]);
float lambda1 = m + sqrt(m * m - p);
float lambda2 = m - sqrt(m * m - p);
R[i] = min(lambda1, lambda2);
// Free memory.
delete[] Ix;
delete[] Iy;
delete[] M;
}
}
__global__ void cornerScoreOnDevice(unsigned char *gradX, unsigned char *gradY,
float *R, int width, int height) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
const int windowSide = 3;
const int windowMargin = int((windowSide - 1) / 2);
// Check for overflow.
if (i >= width * height) {
return;
}
int x = (int)i / width;
int y = (i % width);
// Check for out-of-bound coordinates.
R[i] = 0;
if (x < windowMargin or y < windowMargin or x > height - windowMargin - 1 or
y > width - windowMargin - 1) {
return;
}
// Create the windows Ix and Iy.
float *Ix = new float[windowSide * windowSide];
float *Iy = new float[windowSide * windowSide];
for (int wi = 0; wi < windowSide * windowSide; wi++) {
int dx = ((int)wi / windowSide) - windowMargin;
int dy = (wi % windowSide) - windowMargin;
int di = (x + dx) * width + (y + dy);
Ix[wi] = (float)gradX[di] / PIXEL_VALUES;
Iy[wi] = (float)gradY[di] / PIXEL_VALUES;
}
// Construct the structural matrix.
float *M = new float[4]{0, 0, 0, 0};
sumOfMatmulOnDevice(&M[0], Ix, Ix, windowSide);
sumOfMatmulOnDevice(&M[1], Ix, Iy, windowSide);
sumOfMatmulOnDevice(&M[2], Iy, Ix, windowSide);
sumOfMatmulOnDevice(&M[3], Iy, Iy, windowSide);
// Evaluate the pixel score.
float m = (M[0] + M[3]) / 2;
float p = (M[0] * M[3]) - (M[1] * M[2]);
float lambda1 = m + sqrt(m * m - p);
float lambda2 = m - sqrt(m * m - p);
R[i] = min(lambda1, lambda2);
// Free memory.
delete[] Ix;
delete[] Iy;
delete[] M;
}
void opticalFLowOnHost(int *currentCorners, int *corners, int maxCorners,
unsigned char **currPyramidalScales,
unsigned char **prevPyramidalScales, int levels,
int width0, int height0) {
const int patchSide = 5;
const int windowSide = 9;
const int windowMargin = int((windowSide - 1) / 2);
unsigned char *prevPatch = new unsigned char[patchSide * patchSide];
unsigned char *currPatch = new unsigned char[patchSide * patchSide];
for (int l = levels - 1; l >= 0; l--) {
int width = width0 / pow(2, l);
int height = height0 / pow(2, l);
float minSse;
for (int i = 0; i < maxCorners; i++) {
// Downscale corner from the previous frame.
int lx = (corners[i] / width0) * pow(2, -l);
int ly = (corners[i] % width0) * pow(2, -l);
int prevCorner = int(lx * width + ly);
minSse = 100;
if (l == levels - 1) {
currentCorners[i] = prevCorner;
} else {
// Upscale corner from the previous layer.
int ux = int(currentCorners[i] / (width * 0.5)) * 2;
int uy = (currentCorners[i] % int((width * 0.5))) * 2;
currentCorners[i] = int(ux * width + uy);
}
extractPatchOnHost(prevPatch, prevPyramidalScales[l], prevCorner,
patchSide, width, height);
int x = (int)currentCorners[i] / width;
int y = currentCorners[i] % width;
for (int wi = 0; wi < windowSide * windowSide; wi++) {
int dx = ((int)wi / windowSide) - windowMargin;
int dy = (wi % windowSide) - windowMargin;
int di = (x + dx) * width + (y + dy);
extractPatchOnHost(currPatch, currPyramidalScales[l], di,
patchSide, width, height);
float sse = sumOfSquareDifferencesOnHost(prevPatch, currPatch,
patchSide);
if (sse < minSse) {
currentCorners[i] = di;
minSse = sse;
}
}
}
}
}
__global__ void opticalFLowOnDevice(int *currentCorners, int *corners,
int maxCorners,
unsigned char *currPyramidalScales,
unsigned char *prevPyramidalScales,
int levels, int offsetSize, int width0,
int height0) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
const int patchSide = 5;
const int windowSide = 9;
const int windowMargin = int((windowSide - 1) / 2);
unsigned char *prevPatch = new unsigned char[patchSide * patchSide];
unsigned char *currPatch = new unsigned char[patchSide * patchSide];
for (int l = levels - 1; l >= 0; l--) {
int width = width0 / pow(2, l);
int height = height0 / pow(2, l);
float minSse = 100;
// Downscale corner from the previous frame.
int lx = (corners[i] / width0) * pow(2, -l);
int ly = (corners[i] % width0) * pow(2, -l);
int prevCorner = int(lx * width + ly);
if (l == levels - 1) {
currentCorners[i] = prevCorner;
} else {
// Upscale corner from the previous layer.
int ux = int(currentCorners[i] / (width * 0.5)) * 2;
int uy = (currentCorners[i] % int((width * 0.5))) * 2;
currentCorners[i] = int(ux * width + uy);
}
extractPatchOnDevice(prevPatch, prevPyramidalScales + l * offsetSize,
prevCorner, patchSide, width, height);
int x = (int)currentCorners[i] / width;
int y = currentCorners[i] % width;
for (int wi = 0; wi < windowSide * windowSide; wi++) {
int dx = ((int)wi / windowSide) - windowMargin;
int dy = (wi % windowSide) - windowMargin;
int di = (x + dx) * width + (y + dy);
extractPatchOnDevice(currPatch,
currPyramidalScales + l * offsetSize, di,
patchSide, width, height);
float sse =
sumOfSquareDifferencesOnDevice(prevPatch, currPatch, patchSide);
if (sse < minSse) {
currentCorners[i] = di;
minSse = sse;
}
}
}
delete[] prevPatch;
delete[] currPatch;
}
void rotateOnHost(unsigned char *dst, unsigned char *src, const double radian,
const int width, const int height, const int channels) {
for (int y = 0; y < width; y++) {
for (int x = 0; x < height; x++) {
// Evaluate the source pixels.
int x_center = x - round(height / 2.0);
int y_center = y - round(width / 2.0);
double xa = x_center * cos(-radian) - y_center * sin(-radian) +
round(height / 2.0);
double ya = x_center * sin(-radian) + y_center * cos(-radian) +
round(width / 2.0);
// Check for out-of-bound coordinates.
if (xa < 0 or xa > height or ya < 0 or ya > width) {
// Set pixels to black and exit
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
dst[ib] = 0;
}
continue;
}
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
// Evaluate the four pixels given xs and ys roundings.
int ia[4] = {
channels * (int(floor(xa)) * width + int(floor(ya))) + c,
channels * (int(floor(xa)) * width + int(ceil(ya))) + c,
channels * (int(ceil(xa)) * width + int(floor(ya))) + c,
channels * (int(ceil(xa)) * width + int(ceil(ya))) + c};
// Evaluate the average value of the destination pixel.
float sum = 0.0;
int count = 0;
for (int k = 0; k < 4; k++) {
if (0 <= ia[k] and ia[k] <= width * height * channels) {
sum += src[ia[k]];
count++;
}
}
dst[ib] = int(sum / count);
}
}
}
}
__global__ void rotateOnDevice(unsigned char *dst, unsigned char *src,
const double radian, const int width,
const int height, const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
int x = (int)i / width;
int y = (i % width);
// Evaluate the source pixels.
int x_center = x - round(height / 2.0);
int y_center = y - round(width / 2.0);
double xa =
x_center * cos(-radian) - y_center * sin(-radian) + round(height / 2.0);
double ya =
x_center * sin(-radian) + y_center * cos(-radian) + round(width / 2.0);
// Check for out-of-bound coordinates.
if (xa < 0 or xa > height or ya < 0 or ya > width) {
// Set pixels to black and exit
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
dst[ib] = 0;
}
return;
}
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
// Evaluate the four pixels given xs and ys roundings.
int ia[4] = {channels * (int(floor(xa)) * width + int(floor(ya))) + c,
channels * (int(floor(xa)) * width + int(ceil(ya))) + c,
channels * (int(ceil(xa)) * width + int(floor(ya))) + c,
channels * (int(ceil(xa)) * width + int(ceil(ya))) + c};
// Evaluate the average value of the destination pixel.
float sum = 0.0;
int count = 0;
for (int k = 0; k < 4; k++) {
if (0 <= ia[k] and ia[k] <= width * height * channels) {
sum += src[ia[k]];
count++;
}
}
dst[ib] = int(sum / count);
}
}
void scaleOnHost(unsigned char *dst, unsigned char *src, const double ratio,
const int width, const int height, const int channels) {
int newWidth = width * ratio;
int newHeight = height * ratio;
float inverseRatio = 1.0 / ratio;
for (int y = 0; y < newWidth; y++) {
for (int x = 0; x < newHeight; x++) {
for (int c = 0; c < channels; c++) {
int i = (x * newWidth + y) * channels + c;
float tempValue = 0.0;
for (int dy = -1; dy < 2; dy++) {
for (int dx = -1; dx < 2; dx++) {
int oldI = ((int(inverseRatio * x) + dx) * width +
(int(inverseRatio * y) + dy)) *
channels +
c;
float weight = 1 / (pow(2, 2 + abs(dx) + abs(dy)));
if (oldI < 0 or oldI > width * height * channels) {
continue;
}
tempValue += weight * src[oldI];
}
}
dst[i] = tempValue;
}
}
}
}
__global__ void scaleOnDevice(unsigned char *dst, unsigned char *src,
const double ratio, const int width,
const int height, const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int newWidth = width * ratio;
int newHeight = height * ratio;
float inverseRatio = 1.0 / ratio;
// Check for overflow.
if (i > newWidth * newHeight) {
return;
}
int x = (int)i / newWidth;
int y = (i % newWidth);
for (int c = 0; c < channels; c++) {
float tempValue = 0.0;
for (int dy = -1; dy < 2; dy++) {
for (int dx = -1; dx < 2; dx++) {
int src_i = ((int(inverseRatio * x) + dx) * width +
(int(inverseRatio * y) + dy)) *
channels +
c;
float weight = 1 / (pow(2, 2 + abs(dx) + abs(dy)));
if (src_i < 0 or src_i > width * height * channels) {
continue;
}
tempValue += weight * src[src_i];
}
}
dst[i * channels + c] = tempValue;
}
}
void translateOnHost(unsigned char *dst, unsigned char *src, int px, int py,
const int width, const int height, const int channels) {
for (int y = 0; y < width; y++) {
for (int x = 0; x < height; x++) {
// Evaluate the source pixels.
int xa = x - px;
int ya = y - py;
// Check for out-of-bound coordinates.
if (xa < 0 or xa > height or ya < 0 or ya > width) {
// Set pixels to black and exit
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
dst[ib] = 0;
}
continue;
}
for (int c = 0; c < channels; c++) {
int ia = channels * (xa * width + ya) + c;
int ib = channels * (x * width + y) + c;
dst[ib] = src[ia];
}
}
}
}
__global__ void translateOnDevice(unsigned char *dst, unsigned char *src,
int px, int py, const int width,
const int height, const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
int x = (int)i / width;
int y = (i % width);
// Evaluate the source pixels.
int xa = x - px;
int ya = y - py;
// Check for out-of-bound coordinates.
if (xa < 0 or xa > height or ya < 0 or ya > width) {
// Set pixels to black and exit.
for (int c = 0; c < channels; c++) {
int ib = channels * (x * width + y) + c;
dst[ib] = 0;
}
return;
}
for (int c = 0; c < channels; c++) {
int ia = channels * (xa * width + ya) + c;
int ib = channels * (x * width + y) + c;
dst[ib] = src[ia];
}
}
void transposeOnHost(unsigned char *data, const int width, const int height,
const int channels) {
for (int y = 0; y < width; y++) {
for (int x = 0; x < height; x++) {
for (int c = 0; c < channels; c++) {
int ia = channels * (y * width + x) + c;
int ib = channels * (x * height + y) + c;
if (ia > ib) {
continue;
}
unsigned char temp = data[ib];
data[ib] = data[ia];
data[ia] = temp;
}
}
}
}
__global__ void transposeOnDevice(unsigned char *data, const int width,
const int height, const int channels) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Check for overflow.
if (i >= width * height) {
return;
}
for (int c = 0; c < channels; c++) {
int ia = channels * i + c;
int ib = channels * ((i % width) * height + ((int)i / width)) + c;
if (ia > ib) {
continue;
}
unsigned char temp = data[ib];
data[ib] = data[ia];
data[ia] = temp;
}
}
void sumOfMatmulOnHost(float *total, float *A, float *B, int side) {
float *C = new float[side * side];
*total = 0;
for (int i = 0; i < side * side; i++) {
int x = (int)i / side;
int y = (i % side);
C[i] = 0;
for (int d = 0; d < side; d++) {
int ia = x * side + d;
int ib = d * side + y;
C[i] += A[ia] * B[ib];
}
*total += C[i];
}
delete[] C;
}
__device__ void sumOfMatmulOnDevice(float *total, float *A, float *B,
int side) {
*total = 0;
for (int i = 0; i < side * side; i++) {
int x = (int)i / side;
int y = (i % side);
for (int d = 0; d < side; d++) {
int ia = x * side + d;
int ib = d * side + y;
*total += A[ia] * B[ib];
}
}
}
float sumOfSquareDifferencesOnHost(unsigned char *patch1, unsigned char *patch2,
int patchSide) {
float sse = 0.0;
for (int i = 0; i < patchSide * patchSide; i++) {
sse += pow(float(patch1[i] - patch2[i]), 2);
}
return sse;
}
__device__ float sumOfSquareDifferencesOnDevice(unsigned char *patch1,
unsigned char *patch2,
int patchSide) {
float sse = 0.0;
for (int i = 0; i < patchSide * patchSide; i++) {
sse += pow(float(patch1[i] - patch2[i]), 2);
}
return sse;
}
void extractPatchOnHost(unsigned char *patch, unsigned char *data,
int centerIndex, int patchSide, int width, int height) {
const int patchMargin = int((patchSide - 1) / 2);
for (int pi = 0; pi < patchSide * patchSide; pi++) {
int x = (int)centerIndex / width;
int y = centerIndex % width;
int dx = ((int)pi / patchSide) - patchMargin;
int dy = (pi % patchSide) - patchMargin;
int di = (x + dx) * width + (y + dy);
if (di < 0 or di > width * height) {
patch[pi] = 0;
} else {
patch[pi] = data[di];
}
}
}
__device__ void extractPatchOnDevice(unsigned char *patch, unsigned char *data,
int centerIndex, int patchSide, int width,
int height) {
const int patchMargin = int((patchSide - 1) / 2);
for (int pi = 0; pi < patchSide * patchSide; pi++) {
int x = (int)centerIndex / width;
int y = centerIndex % width;
int dx = ((int)pi / patchSide) - patchMargin;
int dy = (pi % patchSide) - patchMargin;
int di = (x + dx) * width + (y + dy);
if (di < 0 or di > width * height) {
patch[pi] = 0;
} else {
patch[pi] = data[di];
}
}
}
void findHomographyRANSACOnHost(float *matrices, float *scores, int maxIter,
int *currentCorners, int *previousCorners,
int maxCorners, int width, int height,
float thresholdError, float minConfidence) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> uniform(0, maxCorners);
const int N_POINTS = 3;
const int SPACE_DIM = 2;
// Create maxIter models.
float *srcTriplet = new float[N_POINTS * SPACE_DIM];
float *dstTriplet = new float[N_POINTS * SPACE_DIM];
float *estPoint = new float[SPACE_DIM];
float *srcPoint = new float[SPACE_DIM];
float *dstPoint = new float[SPACE_DIM];
for (int n = 0; n < maxIter; n++) {
int offset = n * (N_POINTS * (SPACE_DIM + 1));
scores[n] = INFINITY;
// Select the minimum number of data points to estimate a model.
for (int k = 0; k < N_POINTS; k++) {
int i = uniform(gen);
srcTriplet[k * SPACE_DIM] = (int)previousCorners[i] / width;
srcTriplet[k * SPACE_DIM + 1] = previousCorners[i] % width;
dstTriplet[k * SPACE_DIM] = (int)currentCorners[i] / width;
dstTriplet[k * SPACE_DIM + 1] = currentCorners[i] % width;
}
// Estimate the model that fit the hypothetical inliers.
estimateTransformOnHost(matrices + offset, srcTriplet, dstTriplet);
// Count the points that fit the model and the total error.
int nInliers = 0;
float totalError = 0.0;
for (int i = 0; i < maxCorners; i++) {
srcPoint[0] = (int)previousCorners[i] / width;
srcPoint[1] = previousCorners[i] % width;
dstPoint[0] = (int)currentCorners[i] / width;
dstPoint[1] = currentCorners[i] % width;
// Apply the transform and evaluate the error.
applyTransformOnHost(estPoint, srcPoint, matrices + offset);
float reprojError = pow(int(estPoint[0] - dstPoint[0]), 2) +
pow(int(estPoint[1] - dstPoint[1]), 2);
nInliers += int(reprojError < thresholdError);
totalError += reprojError;
}
// Set the matrix score to the error if the confidence is high
// enough.
float confidence = (float)nInliers / maxCorners;
if (confidence >= minConfidence) {
scores[n] = totalError;
}
}
delete[] srcTriplet;
delete[] dstTriplet;
delete[] estPoint;
delete[] srcPoint;
delete[] dstPoint;
}
__global__ void findHomographyRANSACOnDevice(
float *matrices, float *scores, int maxIter, int *currentCorners,
int *previousCorners, int maxCorners, int *randomCornerIndices, int width,
int height, float thresholdError, float minConfidence) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= maxIter) {
return;
}
const int N_POINTS = 3;
const int SPACE_DIM = 2;
// Create maxIter models.
float *srcTriplet = new float[N_POINTS * SPACE_DIM];
float *dstTriplet = new float[N_POINTS * SPACE_DIM];
float *estPoint = new float[SPACE_DIM];
float *srcPoint = new float[SPACE_DIM];
float *dstPoint = new float[SPACE_DIM];
int offset = i * (N_POINTS * (SPACE_DIM + 1));
scores[i] = INFINITY;
// Select the minimum number of data points to estimate a model.
for (int k = 0; k < N_POINTS; k++) {
int index = randomCornerIndices[N_POINTS * i + k];
srcTriplet[k * SPACE_DIM] = (int)previousCorners[index] / width;
srcTriplet[k * SPACE_DIM + 1] = previousCorners[index] % width;
dstTriplet[k * SPACE_DIM] = (int)currentCorners[index] / width;
dstTriplet[k * SPACE_DIM + 1] = currentCorners[index] % width;
}
// Estimate the model that fit the hypothetical inliers.
estimateTransformOnDevice(matrices + offset, srcTriplet, dstTriplet);
// Count the points that fit the model and the total error.
int nInliers = 0;
float totalError = 0.0;
for (int index = 0; index < maxCorners; index++) {
srcPoint[0] = (int)previousCorners[index] / width;
srcPoint[1] = previousCorners[index] % width;
dstPoint[0] = (int)currentCorners[index] / width;
dstPoint[1] = currentCorners[index] % width;
// Apply the transform and evaluate the error.
applyTransformOnDevice(estPoint, srcPoint, matrices + offset);
float reprojError = pow(int(estPoint[0] - dstPoint[0]), 2) +
pow(int(estPoint[1] - dstPoint[1]), 2);
nInliers += int(reprojError < thresholdError);
totalError += reprojError;
}
// Set the matrix score to the error if the confidence is high
// enough.
float confidence = (float)nInliers / maxCorners;
if (confidence >= minConfidence) {
scores[i] = totalError;
}
delete[] srcTriplet;
delete[] dstTriplet;
delete[] estPoint;
delete[] srcPoint;
delete[] dstPoint;
}
void estimateTransformOnHost(float *A, float *Ui, float *vi) {
const int N_POINTS = 3;
const int SPACE_DIM = 2;
// Create X and Y matrices.
float *X = new float[N_POINTS * (SPACE_DIM + 1)];
float *Y = new float[N_POINTS * (SPACE_DIM + 1)];
for (int d = 0; d < SPACE_DIM + 1; d++) {
for (int n = 0; n < N_POINTS; n++) {
int i = d * (N_POINTS) + n;
int j = n * (SPACE_DIM) + d;
if (d == SPACE_DIM) {
X[i] = 1;
Y[i] = int(n >= N_POINTS - 1);
} else {
X[i] = Ui[j];
Y[i] = vi[j];
}
}
}
float *Xi = new float[N_POINTS * (SPACE_DIM + 1)];
invert3x3MatrixOnHost(Xi, X);
// Get the affine transformation matrix.
matmulOnHost(A, Y, Xi, N_POINTS);
delete[] X;
delete[] Y;
delete[] Xi;
}
__device__ void estimateTransformOnDevice(float *A, float *Ui, float *vi) {
const int N_POINTS = 3;
const int SPACE_DIM = 2;
// Create X and Y matrices.
float *X = new float[N_POINTS * (SPACE_DIM + 1)];
float *Y = new float[N_POINTS * (SPACE_DIM + 1)];
for (int d = 0; d < SPACE_DIM + 1; d++) {
for (int n = 0; n < N_POINTS; n++) {
int i = d * (N_POINTS) + n;
int j = n * (SPACE_DIM) + d;
if (d == SPACE_DIM) {
X[i] = 1;
Y[i] = int(n >= N_POINTS - 1);
} else {
X[i] = Ui[j];
Y[i] = vi[j];
}
}
}
float *Xi = new float[N_POINTS * (SPACE_DIM + 1)];
invert3x3MatrixOnDevice(Xi, X);
// Get the affine transformation matrix.
matmulOnDevice(A, Y, Xi, N_POINTS);
delete[] X;
delete[] Y;
delete[] Xi;
}
void invert3x3MatrixOnHost(float *Xi, float *X) {
float det = X[0] * (X[4] * X[8] - X[5] * X[7]) -
X[1] * (X[3] * X[8] - X[5] * X[6]) +
X[2] * (X[3] * X[7] - X[4] * X[6]);
Xi[0] = +float(X[4] * X[8] - X[5] * X[7]) / det;
Xi[1] = -float(X[1] * X[8] - X[2] * X[7]) / det;
Xi[2] = +float(X[1] * X[5] - X[2] * X[4]) / det;
Xi[3] = -float(X[3] * X[8] - X[5] * X[6]) / det;
Xi[4] = +float(X[0] * X[8] - X[2] * X[6]) / det;
Xi[5] = -float(X[0] * X[5] - X[2] * X[3]) / det;
Xi[6] = +float(X[3] * X[7] - X[4] * X[6]) / det;
Xi[7] = -float(X[0] * X[7] - X[1] * X[6]) / det;
Xi[8] = +float(X[0] * X[4] - X[1] * X[3]) / det;
}
__device__ void invert3x3MatrixOnDevice(float *Xi, float *X) {
float det = X[0] * (X[4] * X[8] - X[5] * X[7]) -
X[1] * (X[3] * X[8] - X[5] * X[6]) +
X[2] * (X[3] * X[7] - X[4] * X[6]);
Xi[0] = +float(X[4] * X[8] - X[5] * X[7]) / det;
Xi[1] = -float(X[1] * X[8] - X[2] * X[7]) / det;
Xi[2] = +float(X[1] * X[5] - X[2] * X[4]) / det;
Xi[3] = -float(X[3] * X[8] - X[5] * X[6]) / det;
Xi[4] = +float(X[0] * X[8] - X[2] * X[6]) / det;
Xi[5] = -float(X[0] * X[5] - X[2] * X[3]) / det;
Xi[6] = +float(X[3] * X[7] - X[4] * X[6]) / det;
Xi[7] = -float(X[0] * X[7] - X[1] * X[6]) / det;
Xi[8] = +float(X[0] * X[4] - X[1] * X[3]) / det;
}
void matmulOnHost(float *C, float *A, float *B, int side) {
for (int i = 0; i < side * side; i++) {
int x = (int)i / side;
int y = (i % side);
C[i] = 0;
for (int d = 0; d < side; d++) {
int ia = x * side + d;
int ib = d * side + y;
C[i] += A[ia] * B[ib];
}
}
}
__device__ void matmulOnDevice(float *C, float *A, float *B, int side) {
for (int i = 0; i < side * side; i++) {
int x = (int)i / side;
int y = (i % side);
C[i] = 0;
for (int d = 0; d < side; d++) {
int ia = x * side + d;
int ib = d * side + y;
C[i] += A[ia] * B[ib];
}
}
}
void applyTransformOnHost(float *dst, float *src, float *A) {
const int SPACE_DIM = 2;
for (int i = 0; i < SPACE_DIM; i++) {
dst[i] = 0.0;
dst[i] += src[0] * A[i * 3 + 0];
dst[i] += src[1] * A[i * 3 + 1];
}
}
__device__ void applyTransformOnDevice(float *dst, float *src, float *A) {
const int SPACE_DIM = 2;
for (int i = 0; i < SPACE_DIM; i++) {
dst[i] = 0.0;
dst[i] += src[0] * A[i * 3 + 0];
dst[i] += src[1] * A[i * 3 + 1];
}
}
|
functions.cuh
|
#ifndef RBC_FUNCTIONS_CUH
#define RBC_FUNCTIONS_CUH
#include <cstddef>
#include <cmath>
#include <cstdio>
#include "kernels.cuh"
#include "getters.cuh"
#include "Lock.cuh"
__global__ void rhs(REAL *v_out, REAL *v_in, size_t N, REAL *f_x, REAL *f_y, REAL mass = 1.0, REAL betta = 1.0);
__global__ void calculate_area(Lock lock, REAL *area, REAL *var, size_t N);
inline __host__ __device__
REAL length(REAL x1, REAL y1, REAL x2, REAL y2) {
return std::sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2));
}
inline __host__ __device__
int sign(REAL x) {
return (x > 0) - (x < 0);
}
#endif // RBC_FUNCTIONS_CUH
|
54b9dadcd58197e8898ebf8fb97db49d193afa25.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <iostream>
#include <stdio.h>
#define BLOCK_SIZE 16
using namespace std;
typedef struct {
int width;
int height;
int stride;
int* elements;
} Matrix;
typedef struct {
int width;
int* elements;
} Vector;
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.elements[row * A.stride + col] = value;
}
void print_matrix(const Matrix A) {
int i;
int size = A.width * A.height;
cout<<"size A "<<size<<endl;
cout<<" MATRIX \n";
for(i = 1; i <= size; i++) {
cout<<A.elements[i-1]<<" ";
if(i % 10 == 0) {
cout<<"\n";
}
}
}
__global__ void macierz_wektor_10_kernel(const Matrix, const Vector, Vector);
void macierz_wektor_10()
{
//create Matrix and Vector on Host (CPU)
Matrix A;
Vector B;
Vector C;
A.width = A.height = A.stride = 10;
size_t size_A = A.width * A.height * sizeof(int);
A.elements = (int*) malloc(size_A);
B.width = 10;
size_t size_B = B.width * sizeof(int);
B.elements = (int*) malloc(size_B);
C.width = 10;
size_t size_C = C.width * sizeof(int);
C.elements = (int*) malloc(size_C);
int i;
for(i = 0; i < A.width*A.height; i++) {
A.elements[i] = (i % 10) + 1;
}
print_matrix(A);
for(i = 0; i < B.width; i++) {
B.elements[i] = (i % 10) + 1;
}
//Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
hipMalloc(&d_A.elements, size_A);
hipMemcpy(d_A.elements, A.elements, size_A, hipMemcpyHostToDevice);
Vector d_B;
d_B.width = B.width;
hipMalloc(&d_B.elements, size_B);
hipMemcpy(d_B.elements, B.elements, size_B, hipMemcpyHostToDevice);
Vector d_C;
d_C.width = C.width;
hipMalloc(&d_C.elements, size_C);
dim3 dimBlock(10, 1);
dim3 dimGrid(1);
hipLaunchKernelGGL(( macierz_wektor_10_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
hipMemcpy(C.elements, d_C.elements, size_C, hipMemcpyDeviceToHost);
for(i = 0; i < C.width; i++) {
cout<<C.elements[i]<<" ";
}
cout<<endl;
}
__global__ void macierz_wektor_10_kernel(const Matrix A, const Vector B, Vector C) {
int col = threadIdx.x;
printf("thread_id_x %d", threadIdx.x);
int vec_val = B.elements[col];
int mul = 0;
int row;
for(row = 0; row < A.height; row++) {
mul += vec_val * A.elements[row*A.width + col];
}
C.elements[col] = mul;
__syncthreads();
}
int main()
{
macierz_wektor_10();
return 0;
}
|
54b9dadcd58197e8898ebf8fb97db49d193afa25.cu
|
#include <cuda_runtime_api.h>
#include <iostream>
#include <stdio.h>
#define BLOCK_SIZE 16
using namespace std;
typedef struct {
int width;
int height;
int stride;
int* elements;
} Matrix;
typedef struct {
int width;
int* elements;
} Vector;
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.elements[row * A.stride + col] = value;
}
void print_matrix(const Matrix A) {
int i;
int size = A.width * A.height;
cout<<"size A "<<size<<endl;
cout<<" MATRIX \n";
for(i = 1; i <= size; i++) {
cout<<A.elements[i-1]<<" ";
if(i % 10 == 0) {
cout<<"\n";
}
}
}
__global__ void macierz_wektor_10_kernel(const Matrix, const Vector, Vector);
void macierz_wektor_10()
{
//create Matrix and Vector on Host (CPU)
Matrix A;
Vector B;
Vector C;
A.width = A.height = A.stride = 10;
size_t size_A = A.width * A.height * sizeof(int);
A.elements = (int*) malloc(size_A);
B.width = 10;
size_t size_B = B.width * sizeof(int);
B.elements = (int*) malloc(size_B);
C.width = 10;
size_t size_C = C.width * sizeof(int);
C.elements = (int*) malloc(size_C);
int i;
for(i = 0; i < A.width*A.height; i++) {
A.elements[i] = (i % 10) + 1;
}
print_matrix(A);
for(i = 0; i < B.width; i++) {
B.elements[i] = (i % 10) + 1;
}
//Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
cudaMalloc(&d_A.elements, size_A);
cudaMemcpy(d_A.elements, A.elements, size_A, cudaMemcpyHostToDevice);
Vector d_B;
d_B.width = B.width;
cudaMalloc(&d_B.elements, size_B);
cudaMemcpy(d_B.elements, B.elements, size_B, cudaMemcpyHostToDevice);
Vector d_C;
d_C.width = C.width;
cudaMalloc(&d_C.elements, size_C);
dim3 dimBlock(10, 1);
dim3 dimGrid(1);
macierz_wektor_10_kernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(C.elements, d_C.elements, size_C, cudaMemcpyDeviceToHost);
for(i = 0; i < C.width; i++) {
cout<<C.elements[i]<<" ";
}
cout<<endl;
}
__global__ void macierz_wektor_10_kernel(const Matrix A, const Vector B, Vector C) {
int col = threadIdx.x;
printf("thread_id_x %d", threadIdx.x);
int vec_val = B.elements[col];
int mul = 0;
int row;
for(row = 0; row < A.height; row++) {
mul += vec_val * A.elements[row*A.width + col];
}
C.elements[col] = mul;
__syncthreads();
}
int main()
{
macierz_wektor_10();
return 0;
}
|
62f677cb44fb5887f73a812ab344d2a3d6cf747d.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/decomposition/params.hpp>
#include <gtest/gtest.h>
#include <raft/core/handle.hpp>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <test_utils.h>
#include <tsvd/tsvd.cuh>
#include <vector>
namespace ML {
template <typename T>
struct TsvdInputs {
T tolerance;
int n_row;
int n_col;
int n_row2;
int n_col2;
float redundancy;
unsigned long long int seed;
int algo;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const TsvdInputs<T>& dims)
{
return os;
}
template <typename T>
class TsvdTest : public ::testing::TestWithParam<TsvdInputs<T>> {
public:
TsvdTest()
: params(::testing::TestWithParam<TsvdInputs<T>>::GetParam()),
stream(handle.get_stream()),
components(0, stream),
components_ref(0, stream),
data2(0, stream),
data2_back(0, stream)
{
basicTest();
advancedTest();
}
protected:
void basicTest()
{
raft::random::Rng r(params.seed, raft::random::GenPC);
int len = params.n_row * params.n_col;
rmm::device_uvector<T> data(len, stream);
std::vector<T> data_h = {1.0, 2.0, 4.0, 2.0, 4.0, 5.0, 5.0, 4.0, 2.0, 1.0, 6.0, 4.0};
data_h.resize(len);
raft::update_device(data.data(), data_h.data(), len, stream);
int len_comp = params.n_col * params.n_col;
components.resize(len_comp, stream);
rmm::device_uvector<T> singular_vals(params.n_col, stream);
std::vector<T> components_ref_h = {
-0.3951, 0.1532, 0.9058, -0.7111, -0.6752, -0.1959, -0.5816, 0.7215, -0.3757};
components_ref_h.resize(len_comp);
components_ref.resize(len_comp, stream);
raft::update_device(components_ref.data(), components_ref_h.data(), len_comp, stream);
paramsTSVD prms;
prms.n_cols = params.n_col;
prms.n_rows = params.n_row;
prms.n_components = params.n_col;
if (params.algo == 0)
prms.algorithm = solver::COV_EIG_DQ;
else
prms.algorithm = solver::COV_EIG_JACOBI;
tsvdFit(handle, data.data(), components.data(), singular_vals.data(), prms, stream);
}
void advancedTest()
{
raft::random::Rng r(params.seed, raft::random::GenPC);
int len = params.n_row2 * params.n_col2;
paramsTSVD prms;
prms.n_cols = params.n_col2;
prms.n_rows = params.n_row2;
prms.n_components = params.n_col2;
if (params.algo == 0)
prms.algorithm = solver::COV_EIG_DQ;
else if (params.algo == 1)
prms.algorithm = solver::COV_EIG_JACOBI;
else
prms.n_components = params.n_col2 - 15;
data2.resize(len, stream);
int redundant_cols = int(params.redundancy * params.n_col2);
int redundant_len = params.n_row2 * redundant_cols;
int informative_cols = params.n_col2 - redundant_cols;
int informative_len = params.n_row2 * informative_cols;
r.uniform(data2.data(), informative_len, T(-1.0), T(1.0), stream);
RAFT_CUDA_TRY(hipMemcpyAsync(data2.data() + informative_len,
data2.data(),
redundant_len * sizeof(T),
hipMemcpyDeviceToDevice,
stream));
rmm::device_uvector<T> data2_trans(prms.n_rows * prms.n_components, stream);
int len_comp = params.n_col2 * prms.n_components;
rmm::device_uvector<T> components2(len_comp, stream);
rmm::device_uvector<T> explained_vars2(prms.n_components, stream);
rmm::device_uvector<T> explained_var_ratio2(prms.n_components, stream);
rmm::device_uvector<T> singular_vals2(prms.n_components, stream);
tsvdFitTransform(handle,
data2.data(),
data2_trans.data(),
components2.data(),
explained_vars2.data(),
explained_var_ratio2.data(),
singular_vals2.data(),
prms,
stream);
data2_back.resize(len, stream);
tsvdInverseTransform(
handle, data2_trans.data(), components2.data(), data2_back.data(), prms, stream);
}
protected:
raft::handle_t handle;
hipStream_t stream = 0;
TsvdInputs<T> params;
rmm::device_uvector<T> components, components_ref, data2, data2_back;
};
const std::vector<TsvdInputs<float>> inputsf2 = {{0.01f, 4, 3, 1024, 128, 0.25f, 1234ULL, 0},
{0.01f, 4, 3, 1024, 128, 0.25f, 1234ULL, 1},
{0.04f, 4, 3, 512, 64, 0.25f, 1234ULL, 2},
{0.04f, 4, 3, 512, 64, 0.25f, 1234ULL, 2}};
const std::vector<TsvdInputs<double>> inputsd2 = {{0.01, 4, 3, 1024, 128, 0.25f, 1234ULL, 0},
{0.01, 4, 3, 1024, 128, 0.25f, 1234ULL, 1},
{0.05, 4, 3, 512, 64, 0.25f, 1234ULL, 2},
{0.05, 4, 3, 512, 64, 0.25f, 1234ULL, 2}};
typedef TsvdTest<float> TsvdTestLeftVecF;
TEST_P(TsvdTestLeftVecF, Result)
{
ASSERT_TRUE(MLCommon::devArrMatch(components.data(),
components_ref.data(),
(params.n_col * params.n_col),
MLCommon::CompareApproxAbs<float>(params.tolerance),
handle.get_stream()));
}
typedef TsvdTest<double> TsvdTestLeftVecD;
TEST_P(TsvdTestLeftVecD, Result)
{
ASSERT_TRUE(MLCommon::devArrMatch(components.data(),
components_ref.data(),
(params.n_col * params.n_col),
MLCommon::CompareApproxAbs<double>(params.tolerance),
handle.get_stream()));
}
typedef TsvdTest<float> TsvdTestDataVecF;
TEST_P(TsvdTestDataVecF, Result)
{
ASSERT_TRUE(MLCommon::devArrMatch(data2.data(),
data2_back.data(),
(params.n_col2 * params.n_col2),
MLCommon::CompareApproxAbs<float>(params.tolerance),
handle.get_stream()));
}
typedef TsvdTest<double> TsvdTestDataVecD;
TEST_P(TsvdTestDataVecD, Result)
{
ASSERT_TRUE(MLCommon::devArrMatch(data2.data(),
data2_back.data(),
(params.n_col2 * params.n_col2),
MLCommon::CompareApproxAbs<double>(params.tolerance),
handle.get_stream()));
}
INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestLeftVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestLeftVecD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestDataVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestDataVecD, ::testing::ValuesIn(inputsd2));
} // end namespace ML
|
62f677cb44fb5887f73a812ab344d2a3d6cf747d.cu
|
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/decomposition/params.hpp>
#include <gtest/gtest.h>
#include <raft/core/handle.hpp>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <test_utils.h>
#include <tsvd/tsvd.cuh>
#include <vector>
namespace ML {
template <typename T>
struct TsvdInputs {
T tolerance;
int n_row;
int n_col;
int n_row2;
int n_col2;
float redundancy;
unsigned long long int seed;
int algo;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const TsvdInputs<T>& dims)
{
return os;
}
template <typename T>
class TsvdTest : public ::testing::TestWithParam<TsvdInputs<T>> {
public:
TsvdTest()
: params(::testing::TestWithParam<TsvdInputs<T>>::GetParam()),
stream(handle.get_stream()),
components(0, stream),
components_ref(0, stream),
data2(0, stream),
data2_back(0, stream)
{
basicTest();
advancedTest();
}
protected:
void basicTest()
{
raft::random::Rng r(params.seed, raft::random::GenPC);
int len = params.n_row * params.n_col;
rmm::device_uvector<T> data(len, stream);
std::vector<T> data_h = {1.0, 2.0, 4.0, 2.0, 4.0, 5.0, 5.0, 4.0, 2.0, 1.0, 6.0, 4.0};
data_h.resize(len);
raft::update_device(data.data(), data_h.data(), len, stream);
int len_comp = params.n_col * params.n_col;
components.resize(len_comp, stream);
rmm::device_uvector<T> singular_vals(params.n_col, stream);
std::vector<T> components_ref_h = {
-0.3951, 0.1532, 0.9058, -0.7111, -0.6752, -0.1959, -0.5816, 0.7215, -0.3757};
components_ref_h.resize(len_comp);
components_ref.resize(len_comp, stream);
raft::update_device(components_ref.data(), components_ref_h.data(), len_comp, stream);
paramsTSVD prms;
prms.n_cols = params.n_col;
prms.n_rows = params.n_row;
prms.n_components = params.n_col;
if (params.algo == 0)
prms.algorithm = solver::COV_EIG_DQ;
else
prms.algorithm = solver::COV_EIG_JACOBI;
tsvdFit(handle, data.data(), components.data(), singular_vals.data(), prms, stream);
}
void advancedTest()
{
raft::random::Rng r(params.seed, raft::random::GenPC);
int len = params.n_row2 * params.n_col2;
paramsTSVD prms;
prms.n_cols = params.n_col2;
prms.n_rows = params.n_row2;
prms.n_components = params.n_col2;
if (params.algo == 0)
prms.algorithm = solver::COV_EIG_DQ;
else if (params.algo == 1)
prms.algorithm = solver::COV_EIG_JACOBI;
else
prms.n_components = params.n_col2 - 15;
data2.resize(len, stream);
int redundant_cols = int(params.redundancy * params.n_col2);
int redundant_len = params.n_row2 * redundant_cols;
int informative_cols = params.n_col2 - redundant_cols;
int informative_len = params.n_row2 * informative_cols;
r.uniform(data2.data(), informative_len, T(-1.0), T(1.0), stream);
RAFT_CUDA_TRY(cudaMemcpyAsync(data2.data() + informative_len,
data2.data(),
redundant_len * sizeof(T),
cudaMemcpyDeviceToDevice,
stream));
rmm::device_uvector<T> data2_trans(prms.n_rows * prms.n_components, stream);
int len_comp = params.n_col2 * prms.n_components;
rmm::device_uvector<T> components2(len_comp, stream);
rmm::device_uvector<T> explained_vars2(prms.n_components, stream);
rmm::device_uvector<T> explained_var_ratio2(prms.n_components, stream);
rmm::device_uvector<T> singular_vals2(prms.n_components, stream);
tsvdFitTransform(handle,
data2.data(),
data2_trans.data(),
components2.data(),
explained_vars2.data(),
explained_var_ratio2.data(),
singular_vals2.data(),
prms,
stream);
data2_back.resize(len, stream);
tsvdInverseTransform(
handle, data2_trans.data(), components2.data(), data2_back.data(), prms, stream);
}
protected:
raft::handle_t handle;
cudaStream_t stream = 0;
TsvdInputs<T> params;
rmm::device_uvector<T> components, components_ref, data2, data2_back;
};
const std::vector<TsvdInputs<float>> inputsf2 = {{0.01f, 4, 3, 1024, 128, 0.25f, 1234ULL, 0},
{0.01f, 4, 3, 1024, 128, 0.25f, 1234ULL, 1},
{0.04f, 4, 3, 512, 64, 0.25f, 1234ULL, 2},
{0.04f, 4, 3, 512, 64, 0.25f, 1234ULL, 2}};
const std::vector<TsvdInputs<double>> inputsd2 = {{0.01, 4, 3, 1024, 128, 0.25f, 1234ULL, 0},
{0.01, 4, 3, 1024, 128, 0.25f, 1234ULL, 1},
{0.05, 4, 3, 512, 64, 0.25f, 1234ULL, 2},
{0.05, 4, 3, 512, 64, 0.25f, 1234ULL, 2}};
typedef TsvdTest<float> TsvdTestLeftVecF;
TEST_P(TsvdTestLeftVecF, Result)
{
ASSERT_TRUE(MLCommon::devArrMatch(components.data(),
components_ref.data(),
(params.n_col * params.n_col),
MLCommon::CompareApproxAbs<float>(params.tolerance),
handle.get_stream()));
}
typedef TsvdTest<double> TsvdTestLeftVecD;
TEST_P(TsvdTestLeftVecD, Result)
{
ASSERT_TRUE(MLCommon::devArrMatch(components.data(),
components_ref.data(),
(params.n_col * params.n_col),
MLCommon::CompareApproxAbs<double>(params.tolerance),
handle.get_stream()));
}
typedef TsvdTest<float> TsvdTestDataVecF;
TEST_P(TsvdTestDataVecF, Result)
{
ASSERT_TRUE(MLCommon::devArrMatch(data2.data(),
data2_back.data(),
(params.n_col2 * params.n_col2),
MLCommon::CompareApproxAbs<float>(params.tolerance),
handle.get_stream()));
}
typedef TsvdTest<double> TsvdTestDataVecD;
TEST_P(TsvdTestDataVecD, Result)
{
ASSERT_TRUE(MLCommon::devArrMatch(data2.data(),
data2_back.data(),
(params.n_col2 * params.n_col2),
MLCommon::CompareApproxAbs<double>(params.tolerance),
handle.get_stream()));
}
INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestLeftVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestLeftVecD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestDataVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestDataVecD, ::testing::ValuesIn(inputsd2));
} // end namespace ML
|
9c76e25d49c7aeab76b4b7fbfe3eb944fa1ef419.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 28
#define ITERATIONS (unsigned)( 1200 )
#define ITERATIONS2 REPLACE_ITERATIONS
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for (unsigned j=0; j<ITERATIONS2; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
C[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))]=m_sum;
}
m_sum+=j;
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
//checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
9c76e25d49c7aeab76b4b7fbfe3eb944fa1ef419.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 28
#define ITERATIONS (unsigned)( 1200 )
#define ITERATIONS2 REPLACE_ITERATIONS
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for (unsigned j=0; j<ITERATIONS2; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
C[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))]=m_sum;
}
m_sum+=j;
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
//checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
15b44ff8fd6c19ea61ac7b3a3ea8b20b0989221b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_nearest_neighbor.cuh"
namespace device
{
__device__ void apply_nearest_neighbor(d_scale_params params, unsigned x, unsigned y)
{
auto y_pixel_front = params.y_precalculation_p[y].front_pixel;
auto x_pixel_front = params.x_precalculation_p[x].front_pixel;
auto x_pixel_rear = params.x_precalculation_p[x].rear_pixel;
auto x_front_weight = params.x_precalculation_p[x].front_weight;
auto x_rear_weight = params.x_precalculation_p[x].rear_weight;
auto base_upper_row = y_pixel_front * params.dimensions_info_p->source_image_width;
auto p_00_index = base_upper_row + x_pixel_front;
auto p_01_index = base_upper_row + x_pixel_rear;
auto* source_bytes = params.source_bytes_sequential_p;
png_byte pixel_p_value;
if (x_front_weight > x_rear_weight)
{
pixel_p_value = source_bytes[p_00_index];
}
else
{
pixel_p_value = source_bytes[p_01_index];
}
auto index_to_output_pixel = y * params.dimensions_info_p->result_image_width + x;
params.result_image_bytes_sequential_p[index_to_output_pixel] = pixel_p_value;
}
}
namespace global
{
__global__ void apply_nearest_neighbor(d_scale_params params, unsigned y_offset, unsigned y_max)
{
auto x = blockIdx.x * blockDim.x + threadIdx.x;
auto y = (blockIdx.y * blockDim.y + threadIdx.y) + y_offset;
if ((x >= params.dimensions_info_p->result_image_width) || (y >= y_max))
{
return;
}
auto y_pixel_front = params.y_precalculation_p[y].front_pixel;
auto x_pixel_front = params.x_precalculation_p[x].front_pixel;
auto x_pixel_rear = params.x_precalculation_p[x].rear_pixel;
auto x_front_weight = params.x_precalculation_p[x].front_weight;
auto x_rear_weight = params.x_precalculation_p[x].rear_weight;
auto base_upper_row = y_pixel_front * params.dimensions_info_p->source_image_width;
auto p_00_index = base_upper_row + x_pixel_front;
auto p_01_index = base_upper_row + x_pixel_rear;
auto* source_bytes = params.source_bytes_sequential_p;
png_byte pixel_p_value;
if (x_front_weight > x_rear_weight)
{
pixel_p_value = source_bytes[p_00_index];
}
else
{
pixel_p_value = source_bytes[p_01_index];
}
auto index_to_output_pixel = y * params.dimensions_info_p->result_image_width + x;
params.result_image_bytes_sequential_p[index_to_output_pixel] = pixel_p_value;
}
}
|
15b44ff8fd6c19ea61ac7b3a3ea8b20b0989221b.cu
|
#include "cuda_nearest_neighbor.cuh"
namespace device
{
__device__ void apply_nearest_neighbor(d_scale_params params, unsigned x, unsigned y)
{
auto y_pixel_front = params.y_precalculation_p[y].front_pixel;
auto x_pixel_front = params.x_precalculation_p[x].front_pixel;
auto x_pixel_rear = params.x_precalculation_p[x].rear_pixel;
auto x_front_weight = params.x_precalculation_p[x].front_weight;
auto x_rear_weight = params.x_precalculation_p[x].rear_weight;
auto base_upper_row = y_pixel_front * params.dimensions_info_p->source_image_width;
auto p_00_index = base_upper_row + x_pixel_front;
auto p_01_index = base_upper_row + x_pixel_rear;
auto* source_bytes = params.source_bytes_sequential_p;
png_byte pixel_p_value;
if (x_front_weight > x_rear_weight)
{
pixel_p_value = source_bytes[p_00_index];
}
else
{
pixel_p_value = source_bytes[p_01_index];
}
auto index_to_output_pixel = y * params.dimensions_info_p->result_image_width + x;
params.result_image_bytes_sequential_p[index_to_output_pixel] = pixel_p_value;
}
}
namespace global
{
__global__ void apply_nearest_neighbor(d_scale_params params, unsigned y_offset, unsigned y_max)
{
auto x = blockIdx.x * blockDim.x + threadIdx.x;
auto y = (blockIdx.y * blockDim.y + threadIdx.y) + y_offset;
if ((x >= params.dimensions_info_p->result_image_width) || (y >= y_max))
{
return;
}
auto y_pixel_front = params.y_precalculation_p[y].front_pixel;
auto x_pixel_front = params.x_precalculation_p[x].front_pixel;
auto x_pixel_rear = params.x_precalculation_p[x].rear_pixel;
auto x_front_weight = params.x_precalculation_p[x].front_weight;
auto x_rear_weight = params.x_precalculation_p[x].rear_weight;
auto base_upper_row = y_pixel_front * params.dimensions_info_p->source_image_width;
auto p_00_index = base_upper_row + x_pixel_front;
auto p_01_index = base_upper_row + x_pixel_rear;
auto* source_bytes = params.source_bytes_sequential_p;
png_byte pixel_p_value;
if (x_front_weight > x_rear_weight)
{
pixel_p_value = source_bytes[p_00_index];
}
else
{
pixel_p_value = source_bytes[p_01_index];
}
auto index_to_output_pixel = y * params.dimensions_info_p->result_image_width + x;
params.result_image_bytes_sequential_p[index_to_output_pixel] = pixel_p_value;
}
}
|
353eca61628cd9dbfc1716d8e087d566cc82b261.hip
|
// !!! This is a file automatically generated by hipify!!!
// CUDA Device Query
#include <stdio.h>
// Print device properties
void printDevProp(hipDeviceProp_t devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
hipGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
return 0;
}
|
353eca61628cd9dbfc1716d8e087d566cc82b261.cu
|
// CUDA Device Query
#include <stdio.h>
// Print device properties
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
return 0;
}
|
d8297d14bd626e6ec7e1c1230af2b7b5e672a0ca.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2015 Matthias Noack ([email protected])
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include "utils.hpp"
#include "kernels.hip"
long benchmark(
complex_t *sigma_in,
complex_t *sigma_out,
complex_t *hamiltonian,
size_t size_sigma,
size_t size_hamiltonian,
complex_t *sigma_reference,
complex_t *sigma_reference_transformed ,
const int dim,
const int num, // global_work_size
const int kernel_id,
size_t vec_length,
decltype(&transform_matrices_aos_to_aosoa) transformation_sigma,
bool scale_hamiltonian,
decltype(&transform_matrix_aos_to_soa) transformation_hamiltonian)
{
initialise_hamiltonian(hamiltonian, dim);
if (scale_hamiltonian)
transform_matrix_scale_aos(hamiltonian, dim); // pre-scale hamiltonian
if (transformation_hamiltonian)
transformation_hamiltonian(hamiltonian, dim);
initialise_sigma(sigma_in, sigma_out, dim, num);
std::memcpy(sigma_reference_transformed, sigma_reference, size_sigma * sizeof(complex_t));
// transform memory layout if a transformation is specified
if (transformation_sigma) {
// transform reference for comparison
transformation_sigma(sigma_reference_transformed, dim, num, vec_length);
// transform sigma
transformation_sigma(sigma_in, dim, num, vec_length);
}
// extract the real and imag data
real_2_t* ham = allocate_aligned<real_2_t>(size_hamiltonian);
real_2_t* sin = allocate_aligned<real_2_t>(size_sigma);
real_2_t* sout = allocate_aligned<real_2_t>(size_sigma);
for (size_t i = 0; i < size_hamiltonian; i++) {
ham[i].x = hamiltonian[i].real();
ham[i].y = hamiltonian[i].imag();
}
for (size_t i = 0; i < size_sigma; i++) {
sin[i].x = sigma_in[i].real();
sin[i].y = sigma_in[i].imag();
}
for (size_t i = 0; i < size_sigma; i++) {
sout[i].x = sigma_out[i].real();
sout[i].y = sigma_out[i].imag();
}
// allocate device memory
real_2_t *d_hamiltonian;
real_2_t *d_sigma_in;
real_2_t *d_sigma_out;
hipMalloc((void**)&d_hamiltonian, sizeof(real_2_t) * size_hamiltonian);
hipMemcpy(d_hamiltonian, ham, sizeof(real_2_t) * size_hamiltonian,
hipMemcpyHostToDevice);
hipMalloc((void**)&d_sigma_in, sizeof(real_2_t) * size_sigma);
hipMemcpy(d_sigma_in, sin, sizeof(real_2_t) * size_sigma,
hipMemcpyHostToDevice);
hipMalloc((void**)&d_sigma_out, sizeof(real_2_t) * size_sigma);
long total_time = 0;
// benchmark loop
for (size_t i = 0; i < NUM_ITERATIONS; ++i) {
// clear output
hipMemcpy(d_sigma_out, sout, sizeof(real_2_t) * size_sigma,
hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
// empty kernel
switch(kernel_id) {
case 0: {
dim3 k0_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k0_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_empty), dim3(k0_gws), dim3(k0_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// initial kernel
case 1: {
dim3 k1_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k1_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_init), dim3(k1_gws), dim3(k1_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// refactored initial kernel
case 2: {
dim3 k2_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k2_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_refactor), dim3(k2_gws), dim3(k2_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// refactored initial kernel with direct store
case 3: {
dim3 k3_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k3_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_refactor_direct_store), dim3(k3_gws), dim3(k3_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 1D range
case 4: {
dim3 k4_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k4_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_aosoa_naive), dim3(k4_gws), dim3(k4_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 1D range and compile time constants
case 5: {
dim3 k5_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k5_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_aosoa_naive_constants), dim3(k5_gws), dim3(k5_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 1D range, compile time constants, and permuted loops with temporaries
case 6: {
dim3 k6_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k6_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_aosoa_naive_constants_perm), dim3(k6_gws), dim3(k6_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 1D range and direct store
case 7: {
dim3 k7_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k7_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_aosoa_naive_direct), dim3(k7_gws), dim3(k7_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 1D range, compile time constants, and direct store
case 8: {
dim3 k8_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k8_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_aosoa_naive_constants_direct), dim3(k8_gws), dim3(k8_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 1D range, compile time constants, direct store, and permuted loops with temporaries
case 9: {
dim3 k9_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k9_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_aosoa_naive_constants_direct_perm), dim3(k9_gws), dim3(k9_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 2D-range
case 10: {
dim3 k10_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k10_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_aosoa), dim3(k10_gws), dim3(k10_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 2D-range and compile-time constants
case 11: {
dim3 k11_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k11_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_aosoa_constants), dim3(k11_gws), dim3(k11_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 2D-range, compile-time constants, and permuted loops with temporaries
case 12: {
dim3 k12_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k12_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_aosoa_constants_perm), dim3(k12_gws), dim3(k12_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 2D range and direct store
case 13: {
dim3 k13_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k13_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_aosoa_direct), dim3(k13_gws), dim3(k13_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 2D range, compile-time constants, and direct store
case 14: {
dim3 k14_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k14_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_aosoa_constants_direct), dim3(k14_gws), dim3(k14_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with compile-time constants, direct store, and permuted loops with temporaries
case 15: {
dim3 k15_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k15_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
hipLaunchKernelGGL(( comm_aosoa_constants_direct_perm), dim3(k15_gws), dim3(k15_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel
case 16: {
dim3 k16_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k16_lws (VEC_LENGTH);
hipLaunchKernelGGL(( comm_manual_aosoa), dim3(k16_gws), dim3(k16_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// manually vectorised kernel with compile-time constants
case 17: {
dim3 k17_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k17_lws (VEC_LENGTH);
hipLaunchKernelGGL(( comm_manual_aosoa_constants), dim3(k17_gws), dim3(k17_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with compile-time constants and permuted loops with temporaries
case 18: {
dim3 k18_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k18_lws (VEC_LENGTH);
hipLaunchKernelGGL(( comm_manual_aosoa_constants_perm), dim3(k18_gws), dim3(k18_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with compile-time constants and prefetch
case 19: {
dim3 k19_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k19_lws (VEC_LENGTH);
hipLaunchKernelGGL(( comm_manual_aosoa_constants_perm_prefetch), dim3(k19_gws), dim3(k19_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with direct store
case 20: {
dim3 k20_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k20_lws (VEC_LENGTH);
hipLaunchKernelGGL(( comm_manual_aosoa_direct), dim3(k20_gws), dim3(k20_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// manually vectorised kernel with compile time constants and direct store
case 21: {
dim3 k21_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k21_lws (VEC_LENGTH);
hipLaunchKernelGGL(( comm_manual_aosoa_constants_direct), dim3(k21_gws), dim3(k21_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with compile time constants, direct store, and prefetch
case 22: {
dim3 k22_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k22_lws (VEC_LENGTH);
hipLaunchKernelGGL(( comm_manual_aosoa_constants_direct_prefetch), dim3(k22_gws), dim3(k22_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with compile time constants, direct store, and permuted loops with temporaries
case 23: {
dim3 k23_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k23_lws (VEC_LENGTH);
hipLaunchKernelGGL(( comm_manual_aosoa_constants_direct_perm), dim3(k23_gws), dim3(k23_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// final GPGPU kernel optimised for an Nvidia GPU
case 24: {
size_t block_dim_x = (dim * dim + WARP_SIZE - 1) / WARP_SIZE * WARP_SIZE;
size_t block_dim_y = NUM_SUB_GROUPS;
dim3 k24_gws (num / (block_dim_y * CHUNK_SIZE), 1);
dim3 k24_lws (block_dim_x, block_dim_y);
hipLaunchKernelGGL(( final_gpu_kernel), dim3(k24_gws), dim3(k24_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, num);
break;
}
default: std::cerr << "ERROR: **** benchmark kernel unavailable **** \n";
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
total_time += time;
}
std::cout << "Total execution time of kernel "
<< look_up(kernel_id) << " : " << total_time * 1e-9 << " (s)" << std::endl;
real_t deviation = 0;
if (kernel_id > 0) {
hipMemcpy(sout, d_sigma_out, sizeof(real_2_t) * size_sigma, hipMemcpyDeviceToHost);
for (size_t i = 0; i < size_sigma; i++) {
sigma_out[i] = {sout[i].x, sout[i].y};
}
// measure the differences between the CPU and GPU results
deviation = compare_matrices(sigma_out, sigma_reference_transformed, dim, num);
std::cout << "Deviation of kernel " << look_up(kernel_id) << ": " << deviation;
} else {
// the deviation of an empty kernel does not make sense
std::cout << "Deviation of kernel " << look_up(kernel_id) << "N/A";
}
std::cout << std::endl << std::endl;
hipFree(d_hamiltonian);
hipFree(d_sigma_in);
hipFree(d_sigma_out);
free(sin);
free(sout);
free(ham);
return total_time;
}
int main(int argc, char* argv[])
{
// debugging
print_compile_config(std::cout);
// constants
const size_t dim = DIM;
const size_t num = NUM;
// allocate host memory
size_t size_hamiltonian = dim * dim;
size_t size_sigma = size_hamiltonian * num;
size_t size_sigma_byte = sizeof(complex_t) * size_sigma;
complex_t* hamiltonian = allocate_aligned<complex_t>(size_hamiltonian);
complex_t* sigma_in = allocate_aligned<complex_t>(size_sigma);
complex_t* sigma_out = allocate_aligned<complex_t>(size_sigma);
complex_t* sigma_reference = allocate_aligned<complex_t>(size_sigma);
complex_t* sigma_reference_transformed = allocate_aligned<complex_t>(size_sigma);
// perform reference computation for correctness analysis
initialise_hamiltonian(hamiltonian, dim);
initialise_sigma(sigma_in, sigma_out, dim, num);
commutator_reference(sigma_in, sigma_out, hamiltonian, dim, num);
// copy reference results
std::memcpy(sigma_reference, sigma_out, size_sigma_byte);
// total kernel time for all benchmarks
long ktime = 0;
// The macro "BENCHMARK(...)" is defined in utils.hpp
ktime += BENCHMARK(0, VEC_LENGTH, NO_TRANSFORM, NO_SCALE_HAMILT, NO_TRANSFORM);
ktime += BENCHMARK(1, VEC_LENGTH, NO_TRANSFORM, NO_SCALE_HAMILT, NO_TRANSFORM);
ktime += BENCHMARK(2, VEC_LENGTH, NO_TRANSFORM, NO_SCALE_HAMILT, NO_TRANSFORM);
ktime += BENCHMARK(3, VEC_LENGTH, NO_TRANSFORM, SCALE_HAMILT, NO_TRANSFORM);
ktime += BENCHMARK(4, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(5, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(6, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(7, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(8, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(9, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(10, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(11, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(12, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(13, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(14, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(15, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(16, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(17, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(18, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(19, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(20, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(21, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(22, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(23, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(24, 2, NO_TRANSFORM, SCALE_HAMILT, NO_TRANSFORM);
printf("Total kernel time for all benchmarks %lf (s)\n", ktime * 1e-9);
free(hamiltonian);
free(sigma_in);
free(sigma_out);
free(sigma_reference);
free(sigma_reference_transformed);
return 0;
}
|
d8297d14bd626e6ec7e1c1230af2b7b5e672a0ca.cu
|
// Copyright (c) 2015 Matthias Noack ([email protected])
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include "utils.hpp"
#include "kernels.cu"
long benchmark(
complex_t *sigma_in,
complex_t *sigma_out,
complex_t *hamiltonian,
size_t size_sigma,
size_t size_hamiltonian,
complex_t *sigma_reference,
complex_t *sigma_reference_transformed ,
const int dim,
const int num, // global_work_size
const int kernel_id,
size_t vec_length,
decltype(&transform_matrices_aos_to_aosoa) transformation_sigma,
bool scale_hamiltonian,
decltype(&transform_matrix_aos_to_soa) transformation_hamiltonian)
{
initialise_hamiltonian(hamiltonian, dim);
if (scale_hamiltonian)
transform_matrix_scale_aos(hamiltonian, dim); // pre-scale hamiltonian
if (transformation_hamiltonian)
transformation_hamiltonian(hamiltonian, dim);
initialise_sigma(sigma_in, sigma_out, dim, num);
std::memcpy(sigma_reference_transformed, sigma_reference, size_sigma * sizeof(complex_t));
// transform memory layout if a transformation is specified
if (transformation_sigma) {
// transform reference for comparison
transformation_sigma(sigma_reference_transformed, dim, num, vec_length);
// transform sigma
transformation_sigma(sigma_in, dim, num, vec_length);
}
// extract the real and imag data
real_2_t* ham = allocate_aligned<real_2_t>(size_hamiltonian);
real_2_t* sin = allocate_aligned<real_2_t>(size_sigma);
real_2_t* sout = allocate_aligned<real_2_t>(size_sigma);
for (size_t i = 0; i < size_hamiltonian; i++) {
ham[i].x = hamiltonian[i].real();
ham[i].y = hamiltonian[i].imag();
}
for (size_t i = 0; i < size_sigma; i++) {
sin[i].x = sigma_in[i].real();
sin[i].y = sigma_in[i].imag();
}
for (size_t i = 0; i < size_sigma; i++) {
sout[i].x = sigma_out[i].real();
sout[i].y = sigma_out[i].imag();
}
// allocate device memory
real_2_t *d_hamiltonian;
real_2_t *d_sigma_in;
real_2_t *d_sigma_out;
cudaMalloc((void**)&d_hamiltonian, sizeof(real_2_t) * size_hamiltonian);
cudaMemcpy(d_hamiltonian, ham, sizeof(real_2_t) * size_hamiltonian,
cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_sigma_in, sizeof(real_2_t) * size_sigma);
cudaMemcpy(d_sigma_in, sin, sizeof(real_2_t) * size_sigma,
cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_sigma_out, sizeof(real_2_t) * size_sigma);
long total_time = 0;
// benchmark loop
for (size_t i = 0; i < NUM_ITERATIONS; ++i) {
// clear output
cudaMemcpy(d_sigma_out, sout, sizeof(real_2_t) * size_sigma,
cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
// empty kernel
switch(kernel_id) {
case 0: {
dim3 k0_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k0_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
comm_empty<<<k0_gws, k0_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// initial kernel
case 1: {
dim3 k1_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k1_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
comm_init<<<k1_gws, k1_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// refactored initial kernel
case 2: {
dim3 k2_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k2_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
comm_refactor<<<k2_gws, k2_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// refactored initial kernel with direct store
case 3: {
dim3 k3_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k3_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
comm_refactor_direct_store<<<k3_gws, k3_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 1D range
case 4: {
dim3 k4_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k4_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
comm_aosoa_naive<<<k4_gws, k4_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 1D range and compile time constants
case 5: {
dim3 k5_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k5_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
comm_aosoa_naive_constants<<<k5_gws, k5_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 1D range, compile time constants, and permuted loops with temporaries
case 6: {
dim3 k6_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k6_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
comm_aosoa_naive_constants_perm<<<k6_gws, k6_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 1D range and direct store
case 7: {
dim3 k7_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k7_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
comm_aosoa_naive_direct<<<k7_gws, k7_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 1D range, compile time constants, and direct store
case 8: {
dim3 k8_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k8_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
comm_aosoa_naive_constants_direct<<<k8_gws, k8_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 1D range, compile time constants, direct store, and permuted loops with temporaries
case 9: {
dim3 k9_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k9_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
comm_aosoa_naive_constants_direct_perm<<<k9_gws, k9_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 2D-range
case 10: {
dim3 k10_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k10_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
comm_aosoa<<<k10_gws, k10_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 2D-range and compile-time constants
case 11: {
dim3 k11_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k11_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
comm_aosoa_constants<<<k11_gws, k11_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 2D-range, compile-time constants, and permuted loops with temporaries
case 12: {
dim3 k12_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k12_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
comm_aosoa_constants_perm<<<k12_gws, k12_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 2D range and direct store
case 13: {
dim3 k13_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k13_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
comm_aosoa_direct<<<k13_gws, k13_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 2D range, compile-time constants, and direct store
case 14: {
dim3 k14_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k14_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
comm_aosoa_constants_direct<<<k14_gws, k14_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with compile-time constants, direct store, and permuted loops with temporaries
case 15: {
dim3 k15_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k15_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
comm_aosoa_constants_direct_perm<<<k15_gws, k15_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel
case 16: {
dim3 k16_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k16_lws (VEC_LENGTH);
comm_manual_aosoa<<<k16_gws, k16_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// manually vectorised kernel with compile-time constants
case 17: {
dim3 k17_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k17_lws (VEC_LENGTH);
comm_manual_aosoa_constants<<<k17_gws, k17_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with compile-time constants and permuted loops with temporaries
case 18: {
dim3 k18_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k18_lws (VEC_LENGTH);
comm_manual_aosoa_constants_perm<<<k18_gws, k18_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with compile-time constants and prefetch
case 19: {
dim3 k19_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k19_lws (VEC_LENGTH);
comm_manual_aosoa_constants_perm_prefetch<<<k19_gws, k19_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with direct store
case 20: {
dim3 k20_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k20_lws (VEC_LENGTH);
comm_manual_aosoa_direct<<<k20_gws, k20_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// manually vectorised kernel with compile time constants and direct store
case 21: {
dim3 k21_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k21_lws (VEC_LENGTH);
comm_manual_aosoa_constants_direct<<<k21_gws, k21_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with compile time constants, direct store, and prefetch
case 22: {
dim3 k22_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k22_lws (VEC_LENGTH);
comm_manual_aosoa_constants_direct_prefetch<<<k22_gws, k22_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with compile time constants, direct store, and permuted loops with temporaries
case 23: {
dim3 k23_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k23_lws (VEC_LENGTH);
comm_manual_aosoa_constants_direct_perm<<<k23_gws, k23_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// final GPGPU kernel optimised for an Nvidia GPU
case 24: {
size_t block_dim_x = (dim * dim + WARP_SIZE - 1) / WARP_SIZE * WARP_SIZE;
size_t block_dim_y = NUM_SUB_GROUPS;
dim3 k24_gws (num / (block_dim_y * CHUNK_SIZE), 1);
dim3 k24_lws (block_dim_x, block_dim_y);
final_gpu_kernel<<<k24_gws, k24_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, num);
break;
}
default: std::cerr << "ERROR: **** benchmark kernel unavailable **** \n";
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
total_time += time;
}
std::cout << "Total execution time of kernel "
<< look_up(kernel_id) << " : " << total_time * 1e-9 << " (s)" << std::endl;
real_t deviation = 0;
if (kernel_id > 0) {
cudaMemcpy(sout, d_sigma_out, sizeof(real_2_t) * size_sigma, cudaMemcpyDeviceToHost);
for (size_t i = 0; i < size_sigma; i++) {
sigma_out[i] = {sout[i].x, sout[i].y};
}
// measure the differences between the CPU and GPU results
deviation = compare_matrices(sigma_out, sigma_reference_transformed, dim, num);
std::cout << "Deviation of kernel " << look_up(kernel_id) << ": " << deviation;
} else {
// the deviation of an empty kernel does not make sense
std::cout << "Deviation of kernel " << look_up(kernel_id) << "N/A";
}
std::cout << std::endl << std::endl;
cudaFree(d_hamiltonian);
cudaFree(d_sigma_in);
cudaFree(d_sigma_out);
free(sin);
free(sout);
free(ham);
return total_time;
}
int main(int argc, char* argv[])
{
// debugging
print_compile_config(std::cout);
// constants
const size_t dim = DIM;
const size_t num = NUM;
// allocate host memory
size_t size_hamiltonian = dim * dim;
size_t size_sigma = size_hamiltonian * num;
size_t size_sigma_byte = sizeof(complex_t) * size_sigma;
complex_t* hamiltonian = allocate_aligned<complex_t>(size_hamiltonian);
complex_t* sigma_in = allocate_aligned<complex_t>(size_sigma);
complex_t* sigma_out = allocate_aligned<complex_t>(size_sigma);
complex_t* sigma_reference = allocate_aligned<complex_t>(size_sigma);
complex_t* sigma_reference_transformed = allocate_aligned<complex_t>(size_sigma);
// perform reference computation for correctness analysis
initialise_hamiltonian(hamiltonian, dim);
initialise_sigma(sigma_in, sigma_out, dim, num);
commutator_reference(sigma_in, sigma_out, hamiltonian, dim, num);
// copy reference results
std::memcpy(sigma_reference, sigma_out, size_sigma_byte);
// total kernel time for all benchmarks
long ktime = 0;
// The macro "BENCHMARK(...)" is defined in utils.hpp
ktime += BENCHMARK(0, VEC_LENGTH, NO_TRANSFORM, NO_SCALE_HAMILT, NO_TRANSFORM);
ktime += BENCHMARK(1, VEC_LENGTH, NO_TRANSFORM, NO_SCALE_HAMILT, NO_TRANSFORM);
ktime += BENCHMARK(2, VEC_LENGTH, NO_TRANSFORM, NO_SCALE_HAMILT, NO_TRANSFORM);
ktime += BENCHMARK(3, VEC_LENGTH, NO_TRANSFORM, SCALE_HAMILT, NO_TRANSFORM);
ktime += BENCHMARK(4, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(5, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(6, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(7, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(8, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(9, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(10, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(11, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(12, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(13, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(14, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(15, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(16, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(17, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(18, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(19, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(20, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(21, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(22, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(23, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
ktime += BENCHMARK(24, 2, NO_TRANSFORM, SCALE_HAMILT, NO_TRANSFORM);
printf("Total kernel time for all benchmarks %lf (s)\n", ktime * 1e-9);
free(hamiltonian);
free(sigma_in);
free(sigma_out);
free(sigma_reference);
free(sigma_reference_transformed);
return 0;
}
|
20b4489efdbe0da17571b04dfe81d3f52a38260f.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef __HIPCC__
# error "A C or C++ compiler has been selected for CUDA"
#endif
/* Version number components: V=Version, R=Revision, P=Patch
Version date components: YYYY=Year, MM=Month, DD=Day */
#if defined(__NVCC__)
# define COMPILER_ID "NVIDIA"
# if defined(_MSC_VER)
# define SIMULATE_ID "MSVC"
# elif defined(__clang__)
# define SIMULATE_ID "Clang"
# elif defined(__GNUC__)
# define SIMULATE_ID "GNU"
# endif
# if defined(__CUDACC_VER_MAJOR__)
# define COMPILER_VERSION_MAJOR DEC(__CUDACC_VER_MAJOR__)
# define COMPILER_VERSION_MINOR DEC(__CUDACC_VER_MINOR__)
# define COMPILER_VERSION_PATCH DEC(__CUDACC_VER_BUILD__)
# endif
# if defined(_MSC_VER)
/* _MSC_VER = VVRR */
# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100)
# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100)
# elif defined(__clang__)
# define SIMULATE_VERSION_MAJOR DEC(__clang_major__)
# define SIMULATE_VERSION_MINOR DEC(__clang_minor__)
# elif defined(__GNUC__)
# define SIMULATE_VERSION_MAJOR DEC(__GNUC__)
# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__)
# endif
#elif defined(__clang__)
# define COMPILER_ID "Clang"
# if defined(_MSC_VER)
# define SIMULATE_ID "MSVC"
# endif
# define COMPILER_VERSION_MAJOR DEC(__clang_major__)
# define COMPILER_VERSION_MINOR DEC(__clang_minor__)
# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__)
# if defined(_MSC_VER)
/* _MSC_VER = VVRR */
# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100)
# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100)
# endif
/* These compilers are either not known or too old to define an
identification macro. Try to identify the platform and guess that
it is the native compiler. */
#elif defined(__hpux) || defined(__hpua)
# define COMPILER_ID "HP"
#else /* unknown compiler */
# define COMPILER_ID ""
#endif
/* Construct the string literal in pieces to prevent the source from
getting matched. Store it in a pointer rather than an array
because some compilers will just produce instructions to fill the
array rather than assigning a pointer to a static array. */
char const* info_compiler = "INFO" ":" "compiler[" COMPILER_ID "]";
#ifdef SIMULATE_ID
char const* info_simulate = "INFO" ":" "simulate[" SIMULATE_ID "]";
#endif
#define STRINGIFY_HELPER(X) #X
#define STRINGIFY(X) STRINGIFY_HELPER(X)
/* Identify known platforms by name. */
#if defined(__linux) || defined(__linux__) || defined(linux)
# define PLATFORM_ID "Linux"
#elif defined(__CYGWIN__)
# define PLATFORM_ID "Cygwin"
#elif defined(__MINGW32__)
# define PLATFORM_ID "MinGW"
#elif defined(__APPLE__)
# define PLATFORM_ID "Darwin"
#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32)
# define PLATFORM_ID "Windows"
#elif defined(__FreeBSD__) || defined(__FreeBSD)
# define PLATFORM_ID "FreeBSD"
#elif defined(__NetBSD__) || defined(__NetBSD)
# define PLATFORM_ID "NetBSD"
#elif defined(__OpenBSD__) || defined(__OPENBSD)
# define PLATFORM_ID "OpenBSD"
#elif defined(__sun) || defined(sun)
# define PLATFORM_ID "SunOS"
#elif defined(_AIX) || defined(__AIX) || defined(__AIX__) || defined(__aix) || defined(__aix__)
# define PLATFORM_ID "AIX"
#elif defined(__hpux) || defined(__hpux__)
# define PLATFORM_ID "HP-UX"
#elif defined(__HAIKU__)
# define PLATFORM_ID "Haiku"
#elif defined(__BeOS) || defined(__BEOS__) || defined(_BEOS)
# define PLATFORM_ID "BeOS"
#elif defined(__QNX__) || defined(__QNXNTO__)
# define PLATFORM_ID "QNX"
#elif defined(__tru64) || defined(_tru64) || defined(__TRU64__)
# define PLATFORM_ID "Tru64"
#elif defined(__riscos) || defined(__riscos__)
# define PLATFORM_ID "RISCos"
#elif defined(__sinix) || defined(__sinix__) || defined(__SINIX__)
# define PLATFORM_ID "SINIX"
#elif defined(__UNIX_SV__)
# define PLATFORM_ID "UNIX_SV"
#elif defined(__bsdos__)
# define PLATFORM_ID "BSDOS"
#elif defined(_MPRAS) || defined(MPRAS)
# define PLATFORM_ID "MP-RAS"
#elif defined(__osf) || defined(__osf__)
# define PLATFORM_ID "OSF1"
#elif defined(_SCO_SV) || defined(SCO_SV) || defined(sco_sv)
# define PLATFORM_ID "SCO_SV"
#elif defined(__ultrix) || defined(__ultrix__) || defined(_ULTRIX)
# define PLATFORM_ID "ULTRIX"
#elif defined(__XENIX__) || defined(_XENIX) || defined(XENIX)
# define PLATFORM_ID "Xenix"
#elif defined(__WATCOMC__)
# if defined(__LINUX__)
# define PLATFORM_ID "Linux"
# elif defined(__DOS__)
# define PLATFORM_ID "DOS"
# elif defined(__OS2__)
# define PLATFORM_ID "OS2"
# elif defined(__WINDOWS__)
# define PLATFORM_ID "Windows3x"
# elif defined(__VXWORKS__)
# define PLATFORM_ID "VxWorks"
# else /* unknown platform */
# define PLATFORM_ID
# endif
#elif defined(__INTEGRITY)
# if defined(INT_178B)
# define PLATFORM_ID "Integrity178"
# else /* regular Integrity */
# define PLATFORM_ID "Integrity"
# endif
#else /* unknown platform */
# define PLATFORM_ID
#endif
/* For windows compilers MSVC and Intel we can determine
the architecture of the compiler being used. This is because
the compilers do not have flags that can change the architecture,
but rather depend on which compiler is being used
*/
#if defined(_WIN32) && defined(_MSC_VER)
# if defined(_M_IA64)
# define ARCHITECTURE_ID "IA64"
# elif defined(_M_X64) || defined(_M_AMD64)
# define ARCHITECTURE_ID "x64"
# elif defined(_M_IX86)
# define ARCHITECTURE_ID "X86"
# elif defined(_M_ARM64)
# define ARCHITECTURE_ID "ARM64"
# elif defined(_M_ARM)
# if _M_ARM == 4
# define ARCHITECTURE_ID "ARMV4I"
# elif _M_ARM == 5
# define ARCHITECTURE_ID "ARMV5I"
# else
# define ARCHITECTURE_ID "ARMV" STRINGIFY(_M_ARM)
# endif
# elif defined(_M_MIPS)
# define ARCHITECTURE_ID "MIPS"
# elif defined(_M_SH)
# define ARCHITECTURE_ID "SHx"
# else /* unknown architecture */
# define ARCHITECTURE_ID ""
# endif
#elif defined(__WATCOMC__)
# if defined(_M_I86)
# define ARCHITECTURE_ID "I86"
# elif defined(_M_IX86)
# define ARCHITECTURE_ID "X86"
# else /* unknown architecture */
# define ARCHITECTURE_ID ""
# endif
#elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC)
# if defined(__ICCARM__)
# define ARCHITECTURE_ID "ARM"
# elif defined(__ICCRX__)
# define ARCHITECTURE_ID "RX"
# elif defined(__ICCRH850__)
# define ARCHITECTURE_ID "RH850"
# elif defined(__ICCRL78__)
# define ARCHITECTURE_ID "RL78"
# elif defined(__ICCRISCV__)
# define ARCHITECTURE_ID "RISCV"
# elif defined(__ICCAVR__)
# define ARCHITECTURE_ID "AVR"
# elif defined(__ICC430__)
# define ARCHITECTURE_ID "MSP430"
# elif defined(__ICCV850__)
# define ARCHITECTURE_ID "V850"
# elif defined(__ICC8051__)
# define ARCHITECTURE_ID "8051"
# else /* unknown architecture */
# define ARCHITECTURE_ID ""
# endif
#elif defined(__ghs__)
# if defined(__PPC64__)
# define ARCHITECTURE_ID "PPC64"
# elif defined(__ppc__)
# define ARCHITECTURE_ID "PPC"
# elif defined(__ARM__)
# define ARCHITECTURE_ID "ARM"
# elif defined(__x86_64__)
# define ARCHITECTURE_ID "x64"
# elif defined(__i386__)
# define ARCHITECTURE_ID "X86"
# else /* unknown architecture */
# define ARCHITECTURE_ID ""
# endif
#elif defined(__TI_COMPILER_VERSION__)
# if defined(__TI_ARM__)
# define ARCHITECTURE_ID "ARM"
# elif defined(__MSP430__)
# define ARCHITECTURE_ID "MSP430"
# elif defined(__TMS320C28XX__)
# define ARCHITECTURE_ID "TMS320C28x"
# elif defined(__TMS320C6X__) || defined(_TMS320C6X)
# define ARCHITECTURE_ID "TMS320C6x"
# else /* unknown architecture */
# define ARCHITECTURE_ID ""
# endif
#else
# define ARCHITECTURE_ID
#endif
/* Convert integer to decimal digit literals. */
#define DEC(n) \
('0' + (((n) / 10000000)%10)), \
('0' + (((n) / 1000000)%10)), \
('0' + (((n) / 100000)%10)), \
('0' + (((n) / 10000)%10)), \
('0' + (((n) / 1000)%10)), \
('0' + (((n) / 100)%10)), \
('0' + (((n) / 10)%10)), \
('0' + ((n) % 10))
/* Convert integer to hex digit literals. */
#define HEX(n) \
('0' + ((n)>>28 & 0xF)), \
('0' + ((n)>>24 & 0xF)), \
('0' + ((n)>>20 & 0xF)), \
('0' + ((n)>>16 & 0xF)), \
('0' + ((n)>>12 & 0xF)), \
('0' + ((n)>>8 & 0xF)), \
('0' + ((n)>>4 & 0xF)), \
('0' + ((n) & 0xF))
/* Construct a string literal encoding the version number components. */
#ifdef COMPILER_VERSION_MAJOR
char const info_version[] = {
'I', 'N', 'F', 'O', ':',
'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','[',
COMPILER_VERSION_MAJOR,
# ifdef COMPILER_VERSION_MINOR
'.', COMPILER_VERSION_MINOR,
# ifdef COMPILER_VERSION_PATCH
'.', COMPILER_VERSION_PATCH,
# ifdef COMPILER_VERSION_TWEAK
'.', COMPILER_VERSION_TWEAK,
# endif
# endif
# endif
']','\0'};
#endif
/* Construct a string literal encoding the internal version number. */
#ifdef COMPILER_VERSION_INTERNAL
char const info_version_internal[] = {
'I', 'N', 'F', 'O', ':',
'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','_',
'i','n','t','e','r','n','a','l','[',
COMPILER_VERSION_INTERNAL,']','\0'};
#endif
/* Construct a string literal encoding the version number components. */
#ifdef SIMULATE_VERSION_MAJOR
char const info_simulate_version[] = {
'I', 'N', 'F', 'O', ':',
's','i','m','u','l','a','t','e','_','v','e','r','s','i','o','n','[',
SIMULATE_VERSION_MAJOR,
# ifdef SIMULATE_VERSION_MINOR
'.', SIMULATE_VERSION_MINOR,
# ifdef SIMULATE_VERSION_PATCH
'.', SIMULATE_VERSION_PATCH,
# ifdef SIMULATE_VERSION_TWEAK
'.', SIMULATE_VERSION_TWEAK,
# endif
# endif
# endif
']','\0'};
#endif
/* Construct the string literal in pieces to prevent the source from
getting matched. Store it in a pointer rather than an array
because some compilers will just produce instructions to fill the
array rather than assigning a pointer to a static array. */
char const* info_platform = "INFO" ":" "platform[" PLATFORM_ID "]";
char const* info_arch = "INFO" ":" "arch[" ARCHITECTURE_ID "]";
const char* info_language_dialect_default = "INFO" ":" "dialect_default["
#if __cplusplus > 201703L
"20"
#elif __cplusplus >= 201703L
"17"
#elif __cplusplus >= 201402L
"14"
#elif __cplusplus >= 201103L
"11"
#else
"03"
#endif
"]";
/*--------------------------------------------------------------------------*/
int main(int argc, char* argv[])
{
int require = 0;
require += info_compiler[argc];
require += info_platform[argc];
#ifdef COMPILER_VERSION_MAJOR
require += info_version[argc];
#endif
#ifdef SIMULATE_ID
require += info_simulate[argc];
#endif
#ifdef SIMULATE_VERSION_MAJOR
require += info_simulate_version[argc];
#endif
require += info_language_dialect_default[argc];
(void)argv;
return require;
}
|
20b4489efdbe0da17571b04dfe81d3f52a38260f.cu
|
#ifndef __CUDACC__
# error "A C or C++ compiler has been selected for CUDA"
#endif
/* Version number components: V=Version, R=Revision, P=Patch
Version date components: YYYY=Year, MM=Month, DD=Day */
#if defined(__NVCC__)
# define COMPILER_ID "NVIDIA"
# if defined(_MSC_VER)
# define SIMULATE_ID "MSVC"
# elif defined(__clang__)
# define SIMULATE_ID "Clang"
# elif defined(__GNUC__)
# define SIMULATE_ID "GNU"
# endif
# if defined(__CUDACC_VER_MAJOR__)
# define COMPILER_VERSION_MAJOR DEC(__CUDACC_VER_MAJOR__)
# define COMPILER_VERSION_MINOR DEC(__CUDACC_VER_MINOR__)
# define COMPILER_VERSION_PATCH DEC(__CUDACC_VER_BUILD__)
# endif
# if defined(_MSC_VER)
/* _MSC_VER = VVRR */
# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100)
# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100)
# elif defined(__clang__)
# define SIMULATE_VERSION_MAJOR DEC(__clang_major__)
# define SIMULATE_VERSION_MINOR DEC(__clang_minor__)
# elif defined(__GNUC__)
# define SIMULATE_VERSION_MAJOR DEC(__GNUC__)
# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__)
# endif
#elif defined(__clang__)
# define COMPILER_ID "Clang"
# if defined(_MSC_VER)
# define SIMULATE_ID "MSVC"
# endif
# define COMPILER_VERSION_MAJOR DEC(__clang_major__)
# define COMPILER_VERSION_MINOR DEC(__clang_minor__)
# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__)
# if defined(_MSC_VER)
/* _MSC_VER = VVRR */
# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100)
# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100)
# endif
/* These compilers are either not known or too old to define an
identification macro. Try to identify the platform and guess that
it is the native compiler. */
#elif defined(__hpux) || defined(__hpua)
# define COMPILER_ID "HP"
#else /* unknown compiler */
# define COMPILER_ID ""
#endif
/* Construct the string literal in pieces to prevent the source from
getting matched. Store it in a pointer rather than an array
because some compilers will just produce instructions to fill the
array rather than assigning a pointer to a static array. */
char const* info_compiler = "INFO" ":" "compiler[" COMPILER_ID "]";
#ifdef SIMULATE_ID
char const* info_simulate = "INFO" ":" "simulate[" SIMULATE_ID "]";
#endif
#define STRINGIFY_HELPER(X) #X
#define STRINGIFY(X) STRINGIFY_HELPER(X)
/* Identify known platforms by name. */
#if defined(__linux) || defined(__linux__) || defined(linux)
# define PLATFORM_ID "Linux"
#elif defined(__CYGWIN__)
# define PLATFORM_ID "Cygwin"
#elif defined(__MINGW32__)
# define PLATFORM_ID "MinGW"
#elif defined(__APPLE__)
# define PLATFORM_ID "Darwin"
#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32)
# define PLATFORM_ID "Windows"
#elif defined(__FreeBSD__) || defined(__FreeBSD)
# define PLATFORM_ID "FreeBSD"
#elif defined(__NetBSD__) || defined(__NetBSD)
# define PLATFORM_ID "NetBSD"
#elif defined(__OpenBSD__) || defined(__OPENBSD)
# define PLATFORM_ID "OpenBSD"
#elif defined(__sun) || defined(sun)
# define PLATFORM_ID "SunOS"
#elif defined(_AIX) || defined(__AIX) || defined(__AIX__) || defined(__aix) || defined(__aix__)
# define PLATFORM_ID "AIX"
#elif defined(__hpux) || defined(__hpux__)
# define PLATFORM_ID "HP-UX"
#elif defined(__HAIKU__)
# define PLATFORM_ID "Haiku"
#elif defined(__BeOS) || defined(__BEOS__) || defined(_BEOS)
# define PLATFORM_ID "BeOS"
#elif defined(__QNX__) || defined(__QNXNTO__)
# define PLATFORM_ID "QNX"
#elif defined(__tru64) || defined(_tru64) || defined(__TRU64__)
# define PLATFORM_ID "Tru64"
#elif defined(__riscos) || defined(__riscos__)
# define PLATFORM_ID "RISCos"
#elif defined(__sinix) || defined(__sinix__) || defined(__SINIX__)
# define PLATFORM_ID "SINIX"
#elif defined(__UNIX_SV__)
# define PLATFORM_ID "UNIX_SV"
#elif defined(__bsdos__)
# define PLATFORM_ID "BSDOS"
#elif defined(_MPRAS) || defined(MPRAS)
# define PLATFORM_ID "MP-RAS"
#elif defined(__osf) || defined(__osf__)
# define PLATFORM_ID "OSF1"
#elif defined(_SCO_SV) || defined(SCO_SV) || defined(sco_sv)
# define PLATFORM_ID "SCO_SV"
#elif defined(__ultrix) || defined(__ultrix__) || defined(_ULTRIX)
# define PLATFORM_ID "ULTRIX"
#elif defined(__XENIX__) || defined(_XENIX) || defined(XENIX)
# define PLATFORM_ID "Xenix"
#elif defined(__WATCOMC__)
# if defined(__LINUX__)
# define PLATFORM_ID "Linux"
# elif defined(__DOS__)
# define PLATFORM_ID "DOS"
# elif defined(__OS2__)
# define PLATFORM_ID "OS2"
# elif defined(__WINDOWS__)
# define PLATFORM_ID "Windows3x"
# elif defined(__VXWORKS__)
# define PLATFORM_ID "VxWorks"
# else /* unknown platform */
# define PLATFORM_ID
# endif
#elif defined(__INTEGRITY)
# if defined(INT_178B)
# define PLATFORM_ID "Integrity178"
# else /* regular Integrity */
# define PLATFORM_ID "Integrity"
# endif
#else /* unknown platform */
# define PLATFORM_ID
#endif
/* For windows compilers MSVC and Intel we can determine
the architecture of the compiler being used. This is because
the compilers do not have flags that can change the architecture,
but rather depend on which compiler is being used
*/
#if defined(_WIN32) && defined(_MSC_VER)
# if defined(_M_IA64)
# define ARCHITECTURE_ID "IA64"
# elif defined(_M_X64) || defined(_M_AMD64)
# define ARCHITECTURE_ID "x64"
# elif defined(_M_IX86)
# define ARCHITECTURE_ID "X86"
# elif defined(_M_ARM64)
# define ARCHITECTURE_ID "ARM64"
# elif defined(_M_ARM)
# if _M_ARM == 4
# define ARCHITECTURE_ID "ARMV4I"
# elif _M_ARM == 5
# define ARCHITECTURE_ID "ARMV5I"
# else
# define ARCHITECTURE_ID "ARMV" STRINGIFY(_M_ARM)
# endif
# elif defined(_M_MIPS)
# define ARCHITECTURE_ID "MIPS"
# elif defined(_M_SH)
# define ARCHITECTURE_ID "SHx"
# else /* unknown architecture */
# define ARCHITECTURE_ID ""
# endif
#elif defined(__WATCOMC__)
# if defined(_M_I86)
# define ARCHITECTURE_ID "I86"
# elif defined(_M_IX86)
# define ARCHITECTURE_ID "X86"
# else /* unknown architecture */
# define ARCHITECTURE_ID ""
# endif
#elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC)
# if defined(__ICCARM__)
# define ARCHITECTURE_ID "ARM"
# elif defined(__ICCRX__)
# define ARCHITECTURE_ID "RX"
# elif defined(__ICCRH850__)
# define ARCHITECTURE_ID "RH850"
# elif defined(__ICCRL78__)
# define ARCHITECTURE_ID "RL78"
# elif defined(__ICCRISCV__)
# define ARCHITECTURE_ID "RISCV"
# elif defined(__ICCAVR__)
# define ARCHITECTURE_ID "AVR"
# elif defined(__ICC430__)
# define ARCHITECTURE_ID "MSP430"
# elif defined(__ICCV850__)
# define ARCHITECTURE_ID "V850"
# elif defined(__ICC8051__)
# define ARCHITECTURE_ID "8051"
# else /* unknown architecture */
# define ARCHITECTURE_ID ""
# endif
#elif defined(__ghs__)
# if defined(__PPC64__)
# define ARCHITECTURE_ID "PPC64"
# elif defined(__ppc__)
# define ARCHITECTURE_ID "PPC"
# elif defined(__ARM__)
# define ARCHITECTURE_ID "ARM"
# elif defined(__x86_64__)
# define ARCHITECTURE_ID "x64"
# elif defined(__i386__)
# define ARCHITECTURE_ID "X86"
# else /* unknown architecture */
# define ARCHITECTURE_ID ""
# endif
#elif defined(__TI_COMPILER_VERSION__)
# if defined(__TI_ARM__)
# define ARCHITECTURE_ID "ARM"
# elif defined(__MSP430__)
# define ARCHITECTURE_ID "MSP430"
# elif defined(__TMS320C28XX__)
# define ARCHITECTURE_ID "TMS320C28x"
# elif defined(__TMS320C6X__) || defined(_TMS320C6X)
# define ARCHITECTURE_ID "TMS320C6x"
# else /* unknown architecture */
# define ARCHITECTURE_ID ""
# endif
#else
# define ARCHITECTURE_ID
#endif
/* Convert integer to decimal digit literals. */
#define DEC(n) \
('0' + (((n) / 10000000)%10)), \
('0' + (((n) / 1000000)%10)), \
('0' + (((n) / 100000)%10)), \
('0' + (((n) / 10000)%10)), \
('0' + (((n) / 1000)%10)), \
('0' + (((n) / 100)%10)), \
('0' + (((n) / 10)%10)), \
('0' + ((n) % 10))
/* Convert integer to hex digit literals. */
#define HEX(n) \
('0' + ((n)>>28 & 0xF)), \
('0' + ((n)>>24 & 0xF)), \
('0' + ((n)>>20 & 0xF)), \
('0' + ((n)>>16 & 0xF)), \
('0' + ((n)>>12 & 0xF)), \
('0' + ((n)>>8 & 0xF)), \
('0' + ((n)>>4 & 0xF)), \
('0' + ((n) & 0xF))
/* Construct a string literal encoding the version number components. */
#ifdef COMPILER_VERSION_MAJOR
char const info_version[] = {
'I', 'N', 'F', 'O', ':',
'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','[',
COMPILER_VERSION_MAJOR,
# ifdef COMPILER_VERSION_MINOR
'.', COMPILER_VERSION_MINOR,
# ifdef COMPILER_VERSION_PATCH
'.', COMPILER_VERSION_PATCH,
# ifdef COMPILER_VERSION_TWEAK
'.', COMPILER_VERSION_TWEAK,
# endif
# endif
# endif
']','\0'};
#endif
/* Construct a string literal encoding the internal version number. */
#ifdef COMPILER_VERSION_INTERNAL
char const info_version_internal[] = {
'I', 'N', 'F', 'O', ':',
'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','_',
'i','n','t','e','r','n','a','l','[',
COMPILER_VERSION_INTERNAL,']','\0'};
#endif
/* Construct a string literal encoding the version number components. */
#ifdef SIMULATE_VERSION_MAJOR
char const info_simulate_version[] = {
'I', 'N', 'F', 'O', ':',
's','i','m','u','l','a','t','e','_','v','e','r','s','i','o','n','[',
SIMULATE_VERSION_MAJOR,
# ifdef SIMULATE_VERSION_MINOR
'.', SIMULATE_VERSION_MINOR,
# ifdef SIMULATE_VERSION_PATCH
'.', SIMULATE_VERSION_PATCH,
# ifdef SIMULATE_VERSION_TWEAK
'.', SIMULATE_VERSION_TWEAK,
# endif
# endif
# endif
']','\0'};
#endif
/* Construct the string literal in pieces to prevent the source from
getting matched. Store it in a pointer rather than an array
because some compilers will just produce instructions to fill the
array rather than assigning a pointer to a static array. */
char const* info_platform = "INFO" ":" "platform[" PLATFORM_ID "]";
char const* info_arch = "INFO" ":" "arch[" ARCHITECTURE_ID "]";
const char* info_language_dialect_default = "INFO" ":" "dialect_default["
#if __cplusplus > 201703L
"20"
#elif __cplusplus >= 201703L
"17"
#elif __cplusplus >= 201402L
"14"
#elif __cplusplus >= 201103L
"11"
#else
"03"
#endif
"]";
/*--------------------------------------------------------------------------*/
int main(int argc, char* argv[])
{
int require = 0;
require += info_compiler[argc];
require += info_platform[argc];
#ifdef COMPILER_VERSION_MAJOR
require += info_version[argc];
#endif
#ifdef SIMULATE_ID
require += info_simulate[argc];
#endif
#ifdef SIMULATE_VERSION_MAJOR
require += info_simulate_version[argc];
#endif
require += info_language_dialect_default[argc];
(void)argv;
return require;
}
|
a28acfbaea116a6c4c8f60b90f0f11c1c10727fd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../gtest.h"
#include <vector>
#include <backends/gpu/reduce_by_key.hpp>
#include <memory/memory.hpp>
#include <util/span.hpp>
#include <util/rangeutil.hpp>
using namespace arb;
template <typename T, typename I>
__global__
void reduce_kernel(const T* src, T* dst, const I* index, int n) {
unsigned tid = threadIdx.x + blockIdx.x*blockDim.x;
unsigned mask = gpu::ballot(0xffffffff, tid<n);
if (tid<n) {
gpu::reduce_by_key(src[tid], dst, index[tid], mask);
}
}
template <typename T>
std::vector<T> reduce(const std::vector<T>& in, size_t n_out, const std::vector<int>& index, unsigned block_dim=128) {
EXPECT_EQ(in.size(), index.size());
EXPECT_TRUE(std::is_sorted(index.begin(), index.end()));
using array = memory::device_vector<T>;
using iarray = memory::device_vector<int>;
int n = in.size();
array src = memory::make_const_view(in);
iarray idx = memory::make_const_view(index);
array dst(n_out, 0);
unsigned grid_dim = (n-1)/block_dim + 1;
hipLaunchKernelGGL(( reduce_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, src.data(), dst.data(), idx.data(), n);
std::vector<T> out(n_out);
memory::copy(dst, memory::make_view(out));
return out;
}
TEST(reduce_by_key, no_repetitions)
{
int n = 64;
{
std::vector<float> in(n, 1);
std::vector<int> index = util::assign_from(util::make_span(0, n));
auto out = reduce(in, n, index);
for (auto o: out) EXPECT_EQ(o, 1.0f);
}
{
std::vector<double> in(n, 1);
std::vector<int> index = util::assign_from(util::make_span(0, n));
auto out = reduce(in, n, index);
for (auto o: out) EXPECT_EQ(o, 1.0);
}
}
TEST(reduce_by_key, single_repeated_index)
{
// Perform reduction of a sequence of 1s of length n
// The expected result is n
for (auto n: {1, 2, 7, 31, 32, 33, 63, 64, 65, 128}) {
std::vector<double> in(n, 1);
std::vector<int> index(n, 0);
auto out = reduce(in, 1, index, 32);
EXPECT_EQ(double(n), out[0]);
}
// Perform reduction of an ascending sequence of {1,2,3,...,n}
// The expected result is n*(n+1)/2
for (auto n: {1, 2, 7, 31, 32, 33, 63, 64, 65, 128}) {
std::vector<double> in = util::assign_from(util::make_span(1, n+1));
std::vector<int> index(n, 0);
auto out = reduce(in, 1, index);
EXPECT_EQ(out[0], double((n+1)*n/2));
}
}
TEST(reduce_by_key, scatter)
{
std::vector<int> index = {0,0,0,1,2,2,2,2,3,3,7,7,7,7,7,11};
unsigned n = util::max_value(index)+1;
std::vector<double> in(index.size(), 1);
std::vector<double> expected = {3., 1., 4., 2., 0., 0., 0., 5., 0., 0., 0., 1.};
EXPECT_EQ(n, expected.size());
auto out = reduce(in, n, index);
EXPECT_EQ(expected, out);
// rerun with 7 threads per thread block, to test
// * using more than one thread block
// * thread blocks that are not a multiple of 32
// * thread blocks that are less than 32
out = reduce(in, n, index, 7);
EXPECT_EQ(expected, out);
}
// Test kernels that perform more than one reduction in a single invokation.
// Used to reproduce and test for synchronization issues on V100 GPUs.
template <typename T, typename I>
__global__
void reduce_twice_kernel(const T* src, T* dst, const I* index, int n) {
unsigned tid = threadIdx.x + blockIdx.x*blockDim.x;
unsigned mask = gpu::ballot(0xffffffff, tid<n);
if (tid<n) {
gpu::reduce_by_key(src[tid], dst, index[tid], mask);
gpu::reduce_by_key(src[tid], dst, index[tid], mask);
}
}
template <typename T>
std::vector<T> reduce_twice(const std::vector<T>& in, size_t n_out, const std::vector<int>& index, unsigned block_dim=128) {
EXPECT_EQ(in.size(), index.size());
EXPECT_TRUE(std::is_sorted(index.begin(), index.end()));
using array = memory::device_vector<T>;
using iarray = memory::device_vector<int>;
int n = in.size();
array src = memory::make_const_view(in);
iarray idx = memory::make_const_view(index);
array dst(n_out, 0);
unsigned grid_dim = (n-1)/block_dim + 1;
hipLaunchKernelGGL(( reduce_twice_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, src.data(), dst.data(), idx.data(), n);
std::vector<T> out(n_out);
memory::copy(dst, memory::make_view(out));
return out;
}
TEST(reduce_by_key, scatter_twice)
{
std::vector<int> index = {0,0,0,1,2,2,3,7,7,7,11};
unsigned n = util::max_value(index)+1;
std::vector<double> in(index.size(), 1);
std::vector<double> expected = {6., 2., 4., 2., 0., 0., 0., 6., 0., 0., 0., 2.};
EXPECT_EQ(n, expected.size());
auto out = reduce_twice(in, n, index);
EXPECT_EQ(expected, out);
// rerun with 7 threads per thread block, to test
// * using more than one thread block
// * thread blocks that are not a multiple of 32
// * thread blocks that are less than 32
out = reduce_twice(in, n, index, 7);
EXPECT_EQ(expected, out);
}
|
a28acfbaea116a6c4c8f60b90f0f11c1c10727fd.cu
|
#include "../gtest.h"
#include <vector>
#include <backends/gpu/reduce_by_key.hpp>
#include <memory/memory.hpp>
#include <util/span.hpp>
#include <util/rangeutil.hpp>
using namespace arb;
template <typename T, typename I>
__global__
void reduce_kernel(const T* src, T* dst, const I* index, int n) {
unsigned tid = threadIdx.x + blockIdx.x*blockDim.x;
unsigned mask = gpu::ballot(0xffffffff, tid<n);
if (tid<n) {
gpu::reduce_by_key(src[tid], dst, index[tid], mask);
}
}
template <typename T>
std::vector<T> reduce(const std::vector<T>& in, size_t n_out, const std::vector<int>& index, unsigned block_dim=128) {
EXPECT_EQ(in.size(), index.size());
EXPECT_TRUE(std::is_sorted(index.begin(), index.end()));
using array = memory::device_vector<T>;
using iarray = memory::device_vector<int>;
int n = in.size();
array src = memory::make_const_view(in);
iarray idx = memory::make_const_view(index);
array dst(n_out, 0);
unsigned grid_dim = (n-1)/block_dim + 1;
reduce_kernel<<<grid_dim, block_dim>>>(src.data(), dst.data(), idx.data(), n);
std::vector<T> out(n_out);
memory::copy(dst, memory::make_view(out));
return out;
}
TEST(reduce_by_key, no_repetitions)
{
int n = 64;
{
std::vector<float> in(n, 1);
std::vector<int> index = util::assign_from(util::make_span(0, n));
auto out = reduce(in, n, index);
for (auto o: out) EXPECT_EQ(o, 1.0f);
}
{
std::vector<double> in(n, 1);
std::vector<int> index = util::assign_from(util::make_span(0, n));
auto out = reduce(in, n, index);
for (auto o: out) EXPECT_EQ(o, 1.0);
}
}
TEST(reduce_by_key, single_repeated_index)
{
// Perform reduction of a sequence of 1s of length n
// The expected result is n
for (auto n: {1, 2, 7, 31, 32, 33, 63, 64, 65, 128}) {
std::vector<double> in(n, 1);
std::vector<int> index(n, 0);
auto out = reduce(in, 1, index, 32);
EXPECT_EQ(double(n), out[0]);
}
// Perform reduction of an ascending sequence of {1,2,3,...,n}
// The expected result is n*(n+1)/2
for (auto n: {1, 2, 7, 31, 32, 33, 63, 64, 65, 128}) {
std::vector<double> in = util::assign_from(util::make_span(1, n+1));
std::vector<int> index(n, 0);
auto out = reduce(in, 1, index);
EXPECT_EQ(out[0], double((n+1)*n/2));
}
}
TEST(reduce_by_key, scatter)
{
std::vector<int> index = {0,0,0,1,2,2,2,2,3,3,7,7,7,7,7,11};
unsigned n = util::max_value(index)+1;
std::vector<double> in(index.size(), 1);
std::vector<double> expected = {3., 1., 4., 2., 0., 0., 0., 5., 0., 0., 0., 1.};
EXPECT_EQ(n, expected.size());
auto out = reduce(in, n, index);
EXPECT_EQ(expected, out);
// rerun with 7 threads per thread block, to test
// * using more than one thread block
// * thread blocks that are not a multiple of 32
// * thread blocks that are less than 32
out = reduce(in, n, index, 7);
EXPECT_EQ(expected, out);
}
// Test kernels that perform more than one reduction in a single invokation.
// Used to reproduce and test for synchronization issues on V100 GPUs.
template <typename T, typename I>
__global__
void reduce_twice_kernel(const T* src, T* dst, const I* index, int n) {
unsigned tid = threadIdx.x + blockIdx.x*blockDim.x;
unsigned mask = gpu::ballot(0xffffffff, tid<n);
if (tid<n) {
gpu::reduce_by_key(src[tid], dst, index[tid], mask);
gpu::reduce_by_key(src[tid], dst, index[tid], mask);
}
}
template <typename T>
std::vector<T> reduce_twice(const std::vector<T>& in, size_t n_out, const std::vector<int>& index, unsigned block_dim=128) {
EXPECT_EQ(in.size(), index.size());
EXPECT_TRUE(std::is_sorted(index.begin(), index.end()));
using array = memory::device_vector<T>;
using iarray = memory::device_vector<int>;
int n = in.size();
array src = memory::make_const_view(in);
iarray idx = memory::make_const_view(index);
array dst(n_out, 0);
unsigned grid_dim = (n-1)/block_dim + 1;
reduce_twice_kernel<<<grid_dim, block_dim>>>(src.data(), dst.data(), idx.data(), n);
std::vector<T> out(n_out);
memory::copy(dst, memory::make_view(out));
return out;
}
TEST(reduce_by_key, scatter_twice)
{
std::vector<int> index = {0,0,0,1,2,2,3,7,7,7,11};
unsigned n = util::max_value(index)+1;
std::vector<double> in(index.size(), 1);
std::vector<double> expected = {6., 2., 4., 2., 0., 0., 0., 6., 0., 0., 0., 2.};
EXPECT_EQ(n, expected.size());
auto out = reduce_twice(in, n, index);
EXPECT_EQ(expected, out);
// rerun with 7 threads per thread block, to test
// * using more than one thread block
// * thread blocks that are not a multiple of 32
// * thread blocks that are less than 32
out = reduce_twice(in, n, index, 7);
EXPECT_EQ(expected, out);
}
|
c06dbc36c5329883760c6dcefbecd6314af7c802.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "KCInitRNGStates.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const uint32_t *gSeeds = NULL;
hipMalloc(&gSeeds, XSIZE*YSIZE);
hiprandStateMRG32k3a_t *gStates = NULL;
hipMalloc(&gStates, XSIZE*YSIZE);
size_t totalCount = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
KCInitRNGStates), dim3(gridBlock),dim3(threadBlock), 0, 0, gSeeds,gStates,totalCount);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
KCInitRNGStates), dim3(gridBlock),dim3(threadBlock), 0, 0, gSeeds,gStates,totalCount);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
KCInitRNGStates), dim3(gridBlock),dim3(threadBlock), 0, 0, gSeeds,gStates,totalCount);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
c06dbc36c5329883760c6dcefbecd6314af7c802.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "KCInitRNGStates.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const uint32_t *gSeeds = NULL;
cudaMalloc(&gSeeds, XSIZE*YSIZE);
curandStateMRG32k3a_t *gStates = NULL;
cudaMalloc(&gStates, XSIZE*YSIZE);
size_t totalCount = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
KCInitRNGStates<<<gridBlock,threadBlock>>>(gSeeds,gStates,totalCount);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
KCInitRNGStates<<<gridBlock,threadBlock>>>(gSeeds,gStates,totalCount);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
KCInitRNGStates<<<gridBlock,threadBlock>>>(gSeeds,gStates,totalCount);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
5bbf05c04382f4d72f0bfbf5f0876c774b77d06d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <list>
#include <algorithm>
#include <iostream>
#include <string>
#include <cmath>
#include <vector>
#include "rocblas.h"
#include <reduce_min_kernel.h>
using namespace std;
void Tokenize(string& str, vector<string>& tokens, const string& delimiters)
{
string::size_type lastPos = str.find_first_not_of(delimiters, 0);
string::size_type pos = str.find_first_of(delimiters, lastPos);
while (string::npos != pos || string::npos != lastPos)
{
tokens.push_back(str.substr(lastPos, pos - lastPos));
lastPos = str.find_first_not_of(delimiters, pos);
pos = str.find_first_of(delimiters, lastPos);
}
}
__global__ void UpdateH(int j1, int j2,float * k1_device, float * k2_device,float C, float * h_device,int * y_device, int Ntotal)
{
int t=threadIdx.x;
int tt = gridDim.x*blockDim.x;
int ctaStart = blockDim.x*blockIdx.x;
int y_device_j1=y_device[j1];
int y_device_j2=y_device[j2];
for(int i=ctaStart+t;i<Ntotal;i+=tt)
{
if(i==j1)
{
h_device[i]+=y_device[i]*y_device_j1*(k1_device[i]+1.0f/C)+y_device[i]*y_device_j2*k2_device[i];
}
else if(i==j2)
{
h_device[i]+=y_device[i]*y_device_j1*k1_device[i]+y_device[i]*y_device_j2*(k2_device[i]+1.0f/C);
}
else
{
h_device[i]+=y_device[i]*y_device_j1*k1_device[i]+y_device[i]*y_device_j2*k2_device[i];
}
}
}
__global__ void FinishRBFKernel(int which_wanted, float * self_dot_device, int Ntotal,int m, float *cb_dot, float bandwidth)
{
int t=threadIdx.x;
int tt = gridDim.x*blockDim.x;
int ctaStart = blockDim.x*blockIdx.x;
float save=self_dot_device[which_wanted];
for(int i=ctaStart+t;i<Ntotal;i+=tt)
{
cb_dot[i]=expf(-bandwidth*(save+self_dot_device[i]-2*cb_dot[i]));
}
}
void RBFKernel(int which_wanted,float * x_device, float * self_dot_device, int Ntotal,int m,float *b_device, float bandwidth, int BANDS)
{
hipblasSgemv('t',m,Ntotal,1.0,x_device,m,&x_device[which_wanted*m],1,0.0,b_device,1);
int gridsize=Ntotal/BANDS;
dim3 dimBlock(BANDS, 1);
dim3 dimGrid(gridsize,1);
hipLaunchKernelGGL(( FinishRBFKernel), dim3(dimBlock),dim3(dimGrid), 0, 0, which_wanted,self_dot_device,Ntotal,m,b_device,bandwidth);
}
int main(int argc, char *argv[])
{
float bandwidth;
int ITER_MAX;
int BANDS;
float C;
if(argc==5)
{
ITER_MAX=atoi(argv[1]);
BANDS=atoi(argv[2]);
C=atof(argv[3]);
bandwidth=atof(argv[4]);
}
else
{
cout<<"Usage: Include the max iteration number and block size"<<endl;
return EXIT_FAILURE;
}
//START INPUT -----------------------------------------------------------
string s;
int Npos,Nneg,m;
vector<string> t;
getline(cin, s);
Tokenize(s,t,",");
if(t.size()==3)
{
Nneg=atoi(t[0].c_str());
Npos=atoi(t[1].c_str());
m=atoi(t[2].c_str());
}
float *xpos = new float[Npos*m];
float *xneg = new float[Nneg*m];
int cpos=0;
int cneg=0;
while(getline(cin, s)!=NULL)
{
vector<string> t;
Tokenize(s,t," ");
int y=atoi(t[0].c_str());
for(int i=1;i<t.size();i++)
{
vector<string> t2;
Tokenize(t[i],t2,":");
if(y==1)
{
xpos[cpos*m+atoi(t2[0].c_str())-1]=atof(t2[1].c_str());
}
else
{
xneg[cneg*m+atoi(t2[0].c_str())-1]=atof(t2[1].c_str());
}
}
if(y==1)
{
cpos++;
}
else
{
cneg++;
}
}
int Ntotal=Nneg+Npos;
int * y=new int[Ntotal];
float * h=new float[Ntotal];
float*x=new float[(Npos+Nneg)*m];
for(int i=0;i<Nneg;i++)
{
h[i]=0;
y[i]=-1;
for(int j=0;j<m;j++)
{
x[i*m+j]=xneg[i*m+j];
}
}
for(int i=Nneg;i<Nneg+Npos;i++)
{
h[i]=0;
y[i]=1;
for(int j=0;j<m;j++)
{
x[i*m+j]=xpos[(i-Nneg)*m+j];
}
}
//END INPUT ---------------------------------------------------------------
hipblasInit();
int * nu=new int[Ntotal];
float * self_dot=new float[Ntotal];
//move this to CUDA
for(int i=0;i<Ntotal;i++)
{
self_dot[i]=0;
nu[i]=0;
for(int j=0;j<m;j++)
{
self_dot[i]+=x[i*m+j]*x[i*m+j];
}
}
float * x_device;
hipblasAlloc(Ntotal*m,sizeof(float),(void**)&x_device);
hipblasSetMatrix(m,Ntotal,sizeof(float),x,m,x_device,m);
int * y_device;
hipblasAlloc(Ntotal, sizeof(int), (void**)&y_device);
hipblasSetVector(Ntotal,sizeof(int),y,1,y_device,1);
float * self_dot_device;
hipblasAlloc(Ntotal, sizeof(float), (void**)&self_dot_device);
hipblasSetVector(Ntotal,sizeof(float),self_dot,1,self_dot_device,1);
float * h_device;
hipblasAlloc(Ntotal, sizeof(float), (void**)&h_device);
hipblasSetVector(Ntotal,sizeof(float),h,1,h_device,1);
float * k1_device,*k2_device;
hipblasAlloc(Ntotal,sizeof(float),(void**)&k1_device);
hipblasAlloc(Ntotal,sizeof(float),(void**)&k2_device);
float *k1=new float[Ntotal];
float *k2=new float[Ntotal];
for(int iter=0;iter<ITER_MAX;iter++)
{
//find min on the two halves of h
int h_neg_min_index=IndexOfMin<float>(h_device,Nneg);
int h_pos_min_index=IndexOfMin<float>(&h_device[Nneg],Npos);
//increase nu for the mins
nu[h_neg_min_index]++;
nu[Nneg+h_pos_min_index]++;
//incrementally maintain the h vector
RBFKernel(h_neg_min_index,x_device, self_dot_device, Ntotal,m,k1_device,bandwidth,BANDS);
RBFKernel(Nneg+h_pos_min_index,x_device, self_dot_device, Ntotal,m,k2_device,bandwidth,BANDS);
int gridsize=Ntotal/BANDS;
dim3 dimBlock(BANDS, 1);
dim3 dimGrid(gridsize,1);
hipblasGetVector(Ntotal,sizeof(float),h_device,1,h,1);
//cout<<h_neg_min_index<<" "<<h_pos_min_index+Nneg<<endl;hipLaunchKernelGGL((
UpdateH), dim3(dimBlock),dim3(dimGrid), 0, 0, h_neg_min_index,Nneg+h_pos_min_index,k1_device,k2_device,C,h_device,y_device,Ntotal);
}
hipblasGetVector(Ntotal,sizeof(float),h_device,1,h,1);
int h_neg_min_index=IndexOfMin<float>(h_device,Nneg);
int h_pos_min_index=IndexOfMin<float>(&h_device[Nneg],Npos);
//int h_neg_min_index=hipblasIsamin(Nneg,h_device,1)-1;
//int h_pos_min_index=hipblasIsamin(Npos,&h_device[Nneg],1)-1;
//hipblasIsamin (int n, const float *x, int incx)
float b=.5*(h[h_pos_min_index+Nneg]-h[h_neg_min_index]);
cout<<Ntotal<<","<<m<<","<<b<<","<<bandwidth<<endl;
for(int i=0;i<Ntotal;i++)
{
cout<<y[i]<<",";
cout<<nu[i];
for(int j=0;j<m-1;j++)
{
if(x[i*m+j]!=0)
{
cout<<" "<<j+1<<":"<<x[i*m+j];
}
}
cout<<endl;
}
return 0;
}
|
5bbf05c04382f4d72f0bfbf5f0876c774b77d06d.cu
|
#include <list>
#include <algorithm>
#include <iostream>
#include <string>
#include <cmath>
#include <vector>
#include "cublas.h"
#include <reduce_min_kernel.h>
using namespace std;
void Tokenize(string& str, vector<string>& tokens, const string& delimiters)
{
string::size_type lastPos = str.find_first_not_of(delimiters, 0);
string::size_type pos = str.find_first_of(delimiters, lastPos);
while (string::npos != pos || string::npos != lastPos)
{
tokens.push_back(str.substr(lastPos, pos - lastPos));
lastPos = str.find_first_not_of(delimiters, pos);
pos = str.find_first_of(delimiters, lastPos);
}
}
__global__ void UpdateH(int j1, int j2,float * k1_device, float * k2_device,float C, float * h_device,int * y_device, int Ntotal)
{
int t=threadIdx.x;
int tt = gridDim.x*blockDim.x;
int ctaStart = blockDim.x*blockIdx.x;
int y_device_j1=y_device[j1];
int y_device_j2=y_device[j2];
for(int i=ctaStart+t;i<Ntotal;i+=tt)
{
if(i==j1)
{
h_device[i]+=y_device[i]*y_device_j1*(k1_device[i]+1.0f/C)+y_device[i]*y_device_j2*k2_device[i];
}
else if(i==j2)
{
h_device[i]+=y_device[i]*y_device_j1*k1_device[i]+y_device[i]*y_device_j2*(k2_device[i]+1.0f/C);
}
else
{
h_device[i]+=y_device[i]*y_device_j1*k1_device[i]+y_device[i]*y_device_j2*k2_device[i];
}
}
}
__global__ void FinishRBFKernel(int which_wanted, float * self_dot_device, int Ntotal,int m, float *cb_dot, float bandwidth)
{
int t=threadIdx.x;
int tt = gridDim.x*blockDim.x;
int ctaStart = blockDim.x*blockIdx.x;
float save=self_dot_device[which_wanted];
for(int i=ctaStart+t;i<Ntotal;i+=tt)
{
cb_dot[i]=expf(-bandwidth*(save+self_dot_device[i]-2*cb_dot[i]));
}
}
void RBFKernel(int which_wanted,float * x_device, float * self_dot_device, int Ntotal,int m,float *b_device, float bandwidth, int BANDS)
{
cublasSgemv('t',m,Ntotal,1.0,x_device,m,&x_device[which_wanted*m],1,0.0,b_device,1);
int gridsize=Ntotal/BANDS;
dim3 dimBlock(BANDS, 1);
dim3 dimGrid(gridsize,1);
FinishRBFKernel<<<dimBlock,dimGrid>>>(which_wanted,self_dot_device,Ntotal,m,b_device,bandwidth);
}
int main(int argc, char *argv[])
{
float bandwidth;
int ITER_MAX;
int BANDS;
float C;
if(argc==5)
{
ITER_MAX=atoi(argv[1]);
BANDS=atoi(argv[2]);
C=atof(argv[3]);
bandwidth=atof(argv[4]);
}
else
{
cout<<"Usage: Include the max iteration number and block size"<<endl;
return EXIT_FAILURE;
}
//START INPUT -----------------------------------------------------------
string s;
int Npos,Nneg,m;
vector<string> t;
getline(cin, s);
Tokenize(s,t,",");
if(t.size()==3)
{
Nneg=atoi(t[0].c_str());
Npos=atoi(t[1].c_str());
m=atoi(t[2].c_str());
}
float *xpos = new float[Npos*m];
float *xneg = new float[Nneg*m];
int cpos=0;
int cneg=0;
while(getline(cin, s)!=NULL)
{
vector<string> t;
Tokenize(s,t," ");
int y=atoi(t[0].c_str());
for(int i=1;i<t.size();i++)
{
vector<string> t2;
Tokenize(t[i],t2,":");
if(y==1)
{
xpos[cpos*m+atoi(t2[0].c_str())-1]=atof(t2[1].c_str());
}
else
{
xneg[cneg*m+atoi(t2[0].c_str())-1]=atof(t2[1].c_str());
}
}
if(y==1)
{
cpos++;
}
else
{
cneg++;
}
}
int Ntotal=Nneg+Npos;
int * y=new int[Ntotal];
float * h=new float[Ntotal];
float*x=new float[(Npos+Nneg)*m];
for(int i=0;i<Nneg;i++)
{
h[i]=0;
y[i]=-1;
for(int j=0;j<m;j++)
{
x[i*m+j]=xneg[i*m+j];
}
}
for(int i=Nneg;i<Nneg+Npos;i++)
{
h[i]=0;
y[i]=1;
for(int j=0;j<m;j++)
{
x[i*m+j]=xpos[(i-Nneg)*m+j];
}
}
//END INPUT ---------------------------------------------------------------
cublasInit();
int * nu=new int[Ntotal];
float * self_dot=new float[Ntotal];
//move this to CUDA
for(int i=0;i<Ntotal;i++)
{
self_dot[i]=0;
nu[i]=0;
for(int j=0;j<m;j++)
{
self_dot[i]+=x[i*m+j]*x[i*m+j];
}
}
float * x_device;
cublasAlloc(Ntotal*m,sizeof(float),(void**)&x_device);
cublasSetMatrix(m,Ntotal,sizeof(float),x,m,x_device,m);
int * y_device;
cublasAlloc(Ntotal, sizeof(int), (void**)&y_device);
cublasSetVector(Ntotal,sizeof(int),y,1,y_device,1);
float * self_dot_device;
cublasAlloc(Ntotal, sizeof(float), (void**)&self_dot_device);
cublasSetVector(Ntotal,sizeof(float),self_dot,1,self_dot_device,1);
float * h_device;
cublasAlloc(Ntotal, sizeof(float), (void**)&h_device);
cublasSetVector(Ntotal,sizeof(float),h,1,h_device,1);
float * k1_device,*k2_device;
cublasAlloc(Ntotal,sizeof(float),(void**)&k1_device);
cublasAlloc(Ntotal,sizeof(float),(void**)&k2_device);
float *k1=new float[Ntotal];
float *k2=new float[Ntotal];
for(int iter=0;iter<ITER_MAX;iter++)
{
//find min on the two halves of h
int h_neg_min_index=IndexOfMin<float>(h_device,Nneg);
int h_pos_min_index=IndexOfMin<float>(&h_device[Nneg],Npos);
//increase nu for the mins
nu[h_neg_min_index]++;
nu[Nneg+h_pos_min_index]++;
//incrementally maintain the h vector
RBFKernel(h_neg_min_index,x_device, self_dot_device, Ntotal,m,k1_device,bandwidth,BANDS);
RBFKernel(Nneg+h_pos_min_index,x_device, self_dot_device, Ntotal,m,k2_device,bandwidth,BANDS);
int gridsize=Ntotal/BANDS;
dim3 dimBlock(BANDS, 1);
dim3 dimGrid(gridsize,1);
cublasGetVector(Ntotal,sizeof(float),h_device,1,h,1);
//cout<<h_neg_min_index<<" "<<h_pos_min_index+Nneg<<endl;
UpdateH<<<dimBlock,dimGrid>>>(h_neg_min_index,Nneg+h_pos_min_index,k1_device,k2_device,C,h_device,y_device,Ntotal);
}
cublasGetVector(Ntotal,sizeof(float),h_device,1,h,1);
int h_neg_min_index=IndexOfMin<float>(h_device,Nneg);
int h_pos_min_index=IndexOfMin<float>(&h_device[Nneg],Npos);
//int h_neg_min_index=cublasIsamin(Nneg,h_device,1)-1;
//int h_pos_min_index=cublasIsamin(Npos,&h_device[Nneg],1)-1;
//cublasIsamin (int n, const float *x, int incx)
float b=.5*(h[h_pos_min_index+Nneg]-h[h_neg_min_index]);
cout<<Ntotal<<","<<m<<","<<b<<","<<bandwidth<<endl;
for(int i=0;i<Ntotal;i++)
{
cout<<y[i]<<",";
cout<<nu[i];
for(int j=0;j<m-1;j++)
{
if(x[i*m+j]!=0)
{
cout<<" "<<j+1<<":"<<x[i*m+j];
}
}
cout<<endl;
}
return 0;
}
|
3370691bebb3f49216a8d2dd02deb852489c6566.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****
*
* GPU accelerated Monte Carlo simulation of the 2D Ising model
*
* Copyright (C) 2008 Tobias Preis (http://www.tobiaspreis.de)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, see
* http://www.gnu.org/licenses/.
*
* Related publication:
*
* T. Preis, P. Virnau, W. Paul, and J. J. Schneider,
* Journal of Computational Physics 228, 4468-4477 (2009)
* doi:10.1016/j.jcp.2009.03.018
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include "cuda_utils.h"
//#include <helper_cuda.h>
#define FLAG_PRINT_SPINS 0
#define FLAG_ENERGY 0
#define T_START 3.00
#define T_FACTOR 0.9
#define T_END 2.00
#define GLOBAL_ITERATIONS 100
#define RANDOM_A 1664525
#define RANDOM_B 1013904223
#define BLOCK_SIZE 512
const unsigned int N=4*BLOCK_SIZE*BLOCK_SIZE;
const unsigned int n=2*BLOCK_SIZE;
#include "rob.cu"
/****
*
* Function declaration
*
*/
void calc(int argc,char** argv);
void cpu_function(double*,int*);
void cpu_function_noreduce(double*,int*);
__global__ void device_function_main(int*,int*,int*,float,bool);
__global__ void device_function_main_noreduce(int*,int*,float,bool);
/****
*
* Main function
*
*/
int main(int argc,char** argv) {
calc(argc,argv);
}
/****
*
* Calc
*
*/
void calc(int argc,char** argv) {
printf("----------------------------------------------------------------------- \n");
printf(" *\n");
printf(" * GPU accelerated Monte Carlo simulation of the 2D Ising model\n");
printf(" *\n");
printf(" * Copyright (C) 2008 Tobias Preis (http://www.tobiaspreis.de)\n");
printf(" *\n");
printf(" * This program is free software; you can redistribute it and/or\n");
printf(" * modify it under the terms of the GNU General Public License\n");
printf(" * as published by the Free Software Foundation; either version\n");
printf(" * 3 of the License, or (at your option) any later version.\n");
printf(" *\n");
printf(" * This program is distributed in the hope that it will be useful,\n");
printf(" * but WITHOUT ANY WARRANTY; without even the implied warranty of\n");
printf(" * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n");
printf(" * GNU General Public License for more details.\n");
printf(" *\n");
printf(" * You should have received a copy of the GNU General Public\n");
printf(" * License along with this program; if not, see\n");
printf(" * http://www.gnu.org/licenses/\n");
printf(" *\n");
printf(" * Related publication:\n");
printf(" *\n");
printf(" * T. Preis, P. Virnau, W. Paul, and J. J. Schneider,\n");
printf(" * Journal of Computational Physics 228, 4468-4477 (2009)\n");
printf(" * doi:10.1016/j.jcp.2009.03.018\n");
printf(" *\n");
printf(" ----------------------------- Ising model ----------------------------- \n");
printf(" Number of Spins: %d \n",N);
printf(" Start Temperature: %f \n",T_START);
printf(" Decreasing Factor: %f \n",T_FACTOR);
printf(" Final Temperature: %f \n",T_END);
printf(" Global Iterations: %d \n",GLOBAL_ITERATIONS);
//Init
//CUT_DEVICE_INIT(argc,argv);
srand48(23);
//Allocate and init host memory for output arrays
int num_entries=0;
for(double t=T_START; t>=T_END; t=t*T_FACTOR) num_entries++;
unsigned int mem_out_size=sizeof(float)*num_entries;
float* h_T=(float*) malloc(mem_out_size);
float* h_E=(float*) malloc(mem_out_size);
unsigned int mem_ref_out_size=sizeof(double)*num_entries;
double* h_ref_E=(double*) malloc(mem_ref_out_size);
num_entries=0;
for(double t=T_START; t>=T_END; t=t*T_FACTOR) {
h_T[num_entries]=t;
num_entries++;
}
//Allocate and init host memory for simulation arrays
unsigned int mem_size=sizeof(int)*N;
unsigned int mem_size_random=sizeof(int)*BLOCK_SIZE*BLOCK_SIZE;
int* h_random_data=(int*) malloc(mem_size_random);
int* h_S=(int*) malloc(mem_size);
unsigned int mem_size_out=sizeof(int)*BLOCK_SIZE;
int* h_out=(int*) malloc(mem_size_out);
h_random_data[0]=1;
for(int i=1;i<BLOCK_SIZE*BLOCK_SIZE;i++) {
h_random_data[i]=16807*h_random_data[i-1];
}
for(int i=0;i<N;i++) {
if(drand48()>0.5) h_S[i]=-1;
else h_S[i]=1;
}
//Create and start timer
float gpu_sum=0;
/*//toby
unsigned int timer=0;
CUDA_CHECK_ERROR(hipDeviceSynchronize());
CUDA_CHECK_ERROR(cutCreateTimer(&timer));
CUDA_CHECK_ERROR(cutStartTimer(timer));*/
//mine
StopWatch stopwatch;
stopwatch.start();
//Allocate device memory for arrays
int* d_random_data;
int* d_S;
int* d_out;
CUDA_CHECK_ERROR(hipMalloc((void**) &d_random_data,mem_size_random));
CUDA_CHECK_ERROR(hipMalloc((void**) &d_S,mem_size));
CUDA_CHECK_ERROR(hipMalloc((void**) &d_out,mem_size_out));
//Stop and destroy timer
printf("\n --------------------------------- GPU --------------------------------- \n");
/* //toby
CUDA_CHECK_ERROR(hipDeviceSynchronize());
CUDA_CHECK_ERROR(cutStopTimer(timer));
float gpu_dt_malloc=cutGetTimerValue(timer);
printf(" Processing time on GPU for allocating: %f (ms) \n",gpu_dt_malloc);
CUDA_CHECK_ERROR(cutDeleteTimer(timer));*/
//mine
float gpu_dt_malloc = stopwatch.stop();
printf(" Processing time on GPU for allocating (StopWatch): %f (ms) \n",gpu_dt_malloc);
gpu_sum+=gpu_dt_malloc;
//Create and start timer
/* //toby
timer=0;
CUDA_CHECK_ERROR(hipDeviceSynchronize());
CUDA_CHECK_ERROR(cutCreateTimer(&timer));
CUDA_CHECK_ERROR(cutStartTimer(timer));*/
//mine
stopwatch.start();
//Copy host memory to device and create mirror of d_S
CUDA_CHECK_ERROR(hipMemcpy(d_random_data,h_random_data,mem_size_random,hipMemcpyHostToDevice));
CUDA_CHECK_ERROR(hipMemcpy(d_S,h_S,mem_size,hipMemcpyHostToDevice));
//Stop and destroy timer
/* //toby
CUDA_CHECK_ERROR(hipDeviceSynchronize());
CUDA_CHECK_ERROR(cutStopTimer(timer));
float gpu_dt_mem=cutGetTimerValue(timer);
printf(" Processing time on GPU for memory transfer: %f (ms) \n",gpu_dt_mem);
CUDA_CHECK_ERROR(cutDeleteTimer(timer));*/
//mine
float gpu_dt_mem = stopwatch.stop();
printf(" Processing time on GPU for memory transfer (StopWatch): %f (ms) \n",gpu_dt_mem);
gpu_sum+=gpu_dt_mem;
//Print spins
if(FLAG_PRINT_SPINS) {
CUDA_CHECK_ERROR(hipMemcpy(h_S,d_S,mem_size,hipMemcpyDeviceToHost));
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
printf("\n");
}
//Create and start timer
/* //toby
timer=0;
CUDA_CHECK_ERROR(hipDeviceSynchronize());
CUDA_CHECK_ERROR(cutCreateTimer(&timer));
CUDA_CHECK_ERROR(cutStartTimer(timer));*/
//mine
stopwatch.start();
//improved kernel
KernelInputs ki;
//malloc on gpu
CUDA_CHECK_ERROR(hipMalloc((void**) &ki.R,mem_size_random));
CUDA_CHECK_ERROR(hipMalloc((void**) &ki.S,mem_size));
//mem copy
CUDA_CHECK_ERROR(hipMemcpy(ki.R,h_random_data,mem_size_random,hipMemcpyHostToDevice));
CUDA_CHECK_ERROR(hipMemcpy(ki.S,h_S,mem_size,hipMemcpyHostToDevice));
//ising 2d checkerboard kernels
IsingKernelInputs2d ki2d;
ki2d.Lx = n;
ki2d.Ly = n;
//shared memory kernel
// int xblocks = ceil(ki2d.Lx/2/BLOCKDIMX);
// int yblocks = ceil(ki2d.Ly/BLOCKDIMY);
// int xthreads = BLOCKDIMX;
// int ythreads = BLOCKDIMY;
//vertical line kernel
int xblocks = ceil(ki2d.Lx/2/THREADS_PER_BLOCK);
int yblocks = ceil(ki2d.Ly/LINE_LENGTH);
int xthreads = THREADS_PER_BLOCK;
int ythreads = 1;
dim3 isingGrid(xblocks,yblocks);
printf("Spin dim: %d, %d\n",ki2d.Lx,ki2d.Ly);
printf("Grid dim: %d, %d\n",xblocks,yblocks);
dim3 isingBlock(xthreads,ythreads);
printf("Block dim: %d, %d\n",xthreads,ythreads);
int S0[N/2];
int S1[N/2];
CUDA_CHECK_ERROR(hipMalloc((void**) &ki2d.S0,mem_size/2));
CUDA_CHECK_ERROR(hipMalloc((void**) &ki2d.S1,mem_size/2));
CUDA_CHECK_ERROR(hipMalloc((void**) &ki2d.R,mem_size_random));
//split spins
int rows = ki2d.Ly;
int cols = ki2d.Lx;
int checkerboardRows = rows;
int checkerboardCols = cols/2;
for(int row=0;row<checkerboardRows;++row)
{
for(int col=0;col<checkerboardCols;++col)
{
bool evenrow = row & 1;
int a0 = 0;
int a1 = 1;
if(evenrow)
{
a0 = 1;
a1 = 0;
}
int checkerboardIndex = row*checkerboardCols + col;
S0[checkerboardIndex] = h_S[checkerboardIndex*2 + a0];
S1[checkerboardIndex] = h_S[checkerboardIndex*2 + a1];
}
}
CUDA_CHECK_ERROR(hipMemcpy(ki2d.S0,S0,mem_size/2,hipMemcpyHostToDevice));
CUDA_CHECK_ERROR(hipMemcpy(ki2d.S1,S1,mem_size/2,hipMemcpyHostToDevice));
CUDA_CHECK_ERROR(hipMemcpy(ki2d.R,h_random_data,mem_size_random,hipMemcpyHostToDevice));
if(argc > 1)
{
hipDeviceSynchronize();
printf("Warning! Debug functions are on! The speedup will be MUCH lower.\n");
printf("Performing random number check\n");
if(not validateKernelInt(ki2d.R,d_random_data,BLOCK_SIZE*BLOCK_SIZE))//new ising
//if(not validateKernelInt(ki.R,d_random_data,BLOCK_SIZE*BLOCK_SIZE))//improved kernel
{
printf("Error!!! Validation function not working!!! (random values) \n");
}
printf("Performing spin check\n");
if(not validateCheckerboardInt(ki2d.S0,ki2d.S1,d_S,ki2d.Ly,ki2d.Lx))//new ising
//if(not validateKernelInt(ki.S,d_S,N))//improved kernel
{
printf("Error!!! Validation function not working!!! (spins) \n");
}
printf("Finished initial debug checks.\n");
}
//Calc energy
num_entries=0;
dim3 threads(BLOCK_SIZE);
dim3 grid(BLOCK_SIZE);
for(float t=T_START;t>=T_END;t=t*T_FACTOR)
{
//rob
bool validationFlag = true;
ki.exp4 = exp(-(4.0)/t);
ki.exp8 = exp(-(8.0)/t);
ki2d.exp4 = exp(-(4.0)/t);
ki2d.exp8 = exp(-(8.0)/t);
//
//double avg_H=0;
for(int global_iteration=0;global_iteration<GLOBAL_ITERATIONS;global_iteration++)
{
//toby
if(argc > 1)
{
//device_function_main<<<grid,threads>>>(d_S,d_out,d_random_data,t,true);
//device_function_main<<<grid,threads>>>(d_S,d_out,d_random_data,t,false);
hipLaunchKernelGGL(( device_function_main_noreduce), dim3(grid),dim3(threads), 0, 0, d_S,d_random_data,t,true);
hipLaunchKernelGGL(( device_function_main_noreduce), dim3(grid),dim3(threads), 0, 0, d_S,d_random_data,t,false);
}
//device_rob<<<grid,threads>>>(ki,true);
//device_rob<<<grid,threads>>>(ki,false);
hipLaunchKernelGGL(( ising2d), dim3(isingGrid),dim3(isingBlock), 0, 0, ki2d,ki2d.S0,ki2d.S1,true);
hipLaunchKernelGGL(( ising2d), dim3(isingGrid),dim3(isingBlock), 0, 0, ki2d,ki2d.S1,ki2d.S0,false);
//device_function_main_noreduce<<<grid,threads>>>(d_S,d_random_data,t,true);
//device_function_main_noreduce<<<grid,threads>>>(d_S,d_random_data,t,false);
//validate
if(argc > 1)
{
hipDeviceSynchronize();
//if(not validateKernelInt(ki.R,d_random_data,BLOCK_SIZE*BLOCK_SIZE))//improved kernel
if(not validateKernelInt(ki2d.R,d_random_data,BLOCK_SIZE*BLOCK_SIZE))//new ising kernel
{
printf("Error!!! Random data results are not same!!! (iteration %d)\n",global_iteration);
validationFlag = false;
break;
}
//if(not validateKernelInt(ki.S,d_S,N))//improved kernel
if(not validateCheckerboardInt(ki2d.S0,ki2d.S1,d_S,ki2d.Ly,ki2d.Lx))//new ising kernel
{
printf("Error!!! Spin results are not same!!! (iteration %d)\n",global_iteration);
validationFlag = false;
break;
}
}
hipDeviceSynchronize();
//CUDA_CHECK_ERROR(hipMemcpy(h_out,d_out,mem_size_out,hipMemcpyDeviceToHost));
//int energy_sum=0;
//for(int i=0;i<BLOCK_SIZE;i++) energy_sum+=h_out[i];
//avg_H+=(float)energy_sum/N;
}
//h_E[num_entries]=avg_H/GLOBAL_ITERATIONS;
//num_entries++;
if(argc > 1 and validationFlag)
{
printf("Passed validation checks for t = %f\n",t);
}
}
//Stop and destroy timer
/* //toby
CUDA_CHECK_ERROR(hipDeviceSynchronize());
CUDA_CHECK_ERROR(cutStopTimer(timer));
float gpu_dt_main=cutGetTimerValue(timer);
printf(" Processing time on GPU for main function: %f (ms) \n",gpu_dt_main);
CUDA_CHECK_ERROR(cutDeleteTimer(timer));*/
//mine
float gpu_dt_main = stopwatch.stop();
printf(" Processing time on GPU for main function (StopWatch): %f (ms) \n",gpu_dt_main);
gpu_sum+=gpu_dt_main;
printf(" Total processing time on GPU: %f (ms) \n",gpu_sum);
//Check kernel execution
//CUDA_CHECK_ERROR("Kernel execution failed");
//Print spins
if(FLAG_PRINT_SPINS)
{
CUDA_CHECK_ERROR(hipMemcpy(h_S,d_S,mem_size,hipMemcpyDeviceToHost));
for(int i=0;i<BLOCK_SIZE*2;i++)
{
for(int j=0;j<BLOCK_SIZE*2;j++)
{
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
}
//Create and start timer
/* //toby
timer=0;
CUDA_CHECK_ERROR(hipDeviceSynchronize());
CUDA_CHECK_ERROR(cutCreateTimer(&timer));
CUDA_CHECK_ERROR(cutStartTimer(timer));*/
//mine
stopwatch.start();
//Reference solution
cpu_function_noreduce(h_ref_E,h_S);
//Print spins
if(FLAG_PRINT_SPINS)
{
printf("\n");
for(int i=0;i<BLOCK_SIZE*2;i++)
{
for(int j=0;j<BLOCK_SIZE*2;j++)
{
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
}
//Stop and destroy timer
printf("\n --------------------------------- CPU --------------------------------- \n");
/* //toby
CUDA_CHECK_ERROR(hipDeviceSynchronize());
CUDA_CHECK_ERROR(cutStopTimer(timer));
float cpu_sum=cutGetTimerValue(timer);
printf(" Total processing time on CPU: %f (ms) \n",cpu_sum);
CUDA_CHECK_ERROR(cutDeleteTimer(timer));*/
//rob
float cpu_sum = stopwatch.stop();
printf(" Total processing time on CPU (StopWatch): %f (ms) \n",cpu_sum);
printf("\n Speedup: %fX \n\n",(cpu_sum/gpu_sum));
//Cleaning memory
free(h_T);
free(h_E);
free(h_ref_E);
free(h_random_data);
free(h_S);
free(h_out);
CUDA_CHECK_ERROR(hipFree(d_random_data));
CUDA_CHECK_ERROR(hipFree(d_S));
CUDA_CHECK_ERROR(hipFree(d_out));
CUDA_CHECK_ERROR(hipFree(ki.R));
CUDA_CHECK_ERROR(hipFree(ki.S));
CUDA_CHECK_ERROR(hipFree(ki2d.S0));
CUDA_CHECK_ERROR(hipFree(ki2d.S1));
CUDA_CHECK_ERROR(hipFree(ki2d.R));
}
/****
*
* Device function main
*
*/
__global__ void device_function_main(int* S,int* out,int* R,float t,bool flag)
{
//Energy variable
int dH=0;
float exp_dH_4=exp(-(4.0)/t);
float exp_dH_8=exp(-(8.0)/t);
//Allocate shared memory
__shared__ int r[BLOCK_SIZE];
//Load random data
r[threadIdx.x]=R[threadIdx.x+BLOCK_SIZE*blockIdx.x];
__syncthreads();
//Stencil computation on spins
//No shared memory utilization for S?!?!?
if(flag)
{
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top left
if(blockIdx.x==0)
{ //Top
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
}
else
{
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
}
else
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom right
if(blockIdx.x==BLOCK_SIZE-1)
{ //Bottom
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
else
{
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
__syncthreads();
}
else
{
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top right
if(blockIdx.x==0)
{ //Top
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
}
else
{
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
}
else
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom left
if(blockIdx.x==BLOCK_SIZE-1)
{ //Bottom
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
else
{
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
//Transfer random data back to global memory
R[threadIdx.x+BLOCK_SIZE*blockIdx.x]=r[threadIdx.x];
if(!flag)
{
//For reduction shared memory array r is used
if(FLAG_ENERGY)
{
//Calc energy
if(blockIdx.x==BLOCK_SIZE-1)
{ //Bottom
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1-2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+S[2*threadIdx.x+1]);
}
else
{
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2+2*BLOCK_SIZE]+S[2*threadIdx.x+1]);
}
}
else
{
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1-2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]);
}
else
{
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2+2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]);
}
}
__syncthreads();
}
else
{
//Calc magnetisation
dH=S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]
+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]
+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]
+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
__syncthreads();
}
//Save partial results back to shared memory in new structure
r[threadIdx.x]=dH;
//Reduction on GPU
for(unsigned int dx=1;dx<BLOCK_SIZE;dx*=2)
{
if(threadIdx.x%(2*dx)==0)
{
r[threadIdx.x]+=r[threadIdx.x+dx];
}
__syncthreads();
}
//Save in out
if(threadIdx.x==0) out[blockIdx.x]=r[0];
}
}
__global__ void device_function_main_noreduce(int* S,int* R,float t,bool flag)
{
//Energy variable
int dH=0;
float exp_dH_4=exp(-(4.0)/t);
float exp_dH_8=exp(-(8.0)/t);
//Allocate shared memory
__shared__ int r[BLOCK_SIZE];
//Load random data
r[threadIdx.x]=R[threadIdx.x+BLOCK_SIZE*blockIdx.x];
__syncthreads();
//Stencil computation on spins
//No shared memory utilization for S?!?!?
if(flag)
{
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top left
if(blockIdx.x==0)
{ //Top
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
}
else
{
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
// int globalIndex = 2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x;
// int ii[4];
// ii[0]=globalIndex-2*BLOCK_SIZE+N;
// ii[1]=globalIndex+1;
// ii[2]=globalIndex+2*BLOCK_SIZE;
// ii[3]=globalIndex-1;
// int i=10;
// if(globalIndex == i)
// {
// printf("Old kernel spins at i=%d: (t,r,b,l): %d,%d,%d,%d,%d,%d,%d,%d\n",
// i,
// S[globalIndex+1],
// S[globalIndex-1],
// S[globalIndex+2*BLOCK_SIZE],
// S[globalIndex-2*BLOCK_SIZE+N],
// ii[0],ii[1],ii[2],ii[3]
// );
// }
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
}
else
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom right
if(blockIdx.x==BLOCK_SIZE-1)
{ //Bottom
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
else
{
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
__syncthreads();
}
else
{
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top right
if(blockIdx.x==0)
{ //Top
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
}
else
{
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
}
else
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom left
if(blockIdx.x==BLOCK_SIZE-1)
{ //Bottom
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
else
{
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
//Transfer random data back to global memory
R[threadIdx.x+BLOCK_SIZE*blockIdx.x]=r[threadIdx.x];
}
/****
*
* CPU function
*
*/
void cpu_function(double* E, int* S)
{
int random=23;
int num_entries=0;
for(double t=T_START;t>=T_END;t=t*T_FACTOR)
{
double avg_H=0;
double exp_dH_4=exp(-(4.0)/t);
double exp_dH_8=exp(-(8.0)/t);
for(int global_iteration=0;global_iteration<GLOBAL_ITERATIONS;++global_iteration)
{
if(FLAG_ENERGY)
{
//Energy
double H=0;
for(int x=0;x<n;++x)
{
for(int y=0;y<n;++y)
{
int xr=x+1,yd=y+1;
if(xr==n) xr=0;
if(yd==n) yd=0;
H+=-S[y*n+x]*(S[y*n+xr]+S[yd*n+x]);
}
}
avg_H+=H/N;
} else {
//Magnetisation
double H=0;
for(int x=0;x<N;++x)
{
H+=S[x];
}
avg_H+=H/N;
}
for(int x=0;x<n;++x)
{
for(int y=0;y<n;++y)
{
if((y*(n+1)+x)%2==0)
{
int xl=x-1,yl=y,xu=x,yu=y-1,xr=x+1,yr=y,xd=x,yd=y+1;
if(x==0)
{
xl=n-1;
}
else if(x==n-1)
{
xr=0;
}
if(y==0)
{
yu=n-1;
}
else if(y==n-1)
{
yd=0;
}
//Initial local energy
int dH=2*S[y*n+x]*(
S[yl*n+xl]+
S[yr*n+xr]+
S[yu*n+xu]+
S[yd*n+xd]
);
if(dH==4)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_4)
{
S[y*n+x]=-S[y*n+x];
}
}
else if(dH==8)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_8)
{
S[y*n+x]=-S[y*n+x];
}
}
else
{
S[y*n+x]=-S[y*n+x];
}
}
}
}
for(int x=0;x<n;++x)
{
for(int y=0;y<n;++y)
{
if((y*(n+1)+x)%2==1)
{
int xl=x-1,yl=y,xu=x,yu=y-1,xr=x+1,yr=y,xd=x,yd=y+1;
if(x==0)
{
xl=n-1;
}
else if(x==n-1)
{
xr=0;
}
if(y==0)
{
yu=n-1;
}
else if(y==n-1)
{
yd=0;
}
//Initial local energy
int dH=2*S[y*n+x]*(
S[yl*n+xl]+
S[yr*n+xr]+
S[yu*n+xu]+
S[yd*n+xd]
);
if(dH==4)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_4) {
S[y*n+x]=-S[y*n+x];
}
}
else if(dH==8)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_8)
{
S[y*n+x]=-S[y*n+x];
}
}
else
{
S[y*n+x]=-S[y*n+x];
}
}
}
}
}
E[num_entries]=avg_H/GLOBAL_ITERATIONS;
num_entries++;
}
}
void cpu_function_noreduce(double* E, int* S)
{
int random=23;
//int num_entries=0;
for(double t=T_START;t>=T_END;t=t*T_FACTOR)
{
//double avg_H=0;
double exp_dH_4=exp(-(4.0)/t);
double exp_dH_8=exp(-(8.0)/t);
for(int global_iteration=0;global_iteration<GLOBAL_ITERATIONS;++global_iteration)
{
/*if(FLAG_ENERGY)
{
//Energy
double H=0;
for(int x=0;x<n;++x)
{
for(int y=0;y<n;++y)
{
int xr=x+1,yd=y+1;
if(xr==n) xr=0;
if(yd==n) yd=0;
H+=-S[y*n+x]*(S[y*n+xr]+S[yd*n+x]);
}
}
avg_H+=H/N;
} else {
//Magnetisation
double H=0;
for(int x=0;x<N;++x)
{
H+=S[x];
}
avg_H+=H/N;
}*/
for(int x=0;x<n;++x)
{
for(int y=0;y<n;++y)
{
if((y*(n+1)+x)%2==0)
{
int xl=x-1,yl=y,xu=x,yu=y-1,xr=x+1,yr=y,xd=x,yd=y+1;
if(x==0)
{
xl=n-1;
}
else if(x==n-1)
{
xr=0;
}
if(y==0)
{
yu=n-1;
}
else if(y==n-1)
{
yd=0;
}
//Initial local energy
int dH=2*S[y*n+x]*(
S[yl*n+xl]+
S[yr*n+xr]+
S[yu*n+xu]+
S[yd*n+xd]
);
if(dH==4)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_4)
{
S[y*n+x]=-S[y*n+x];
}
}
else if(dH==8)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_8)
{
S[y*n+x]=-S[y*n+x];
}
}
else
{
S[y*n+x]=-S[y*n+x];
}
}
}
}
for(int x=0;x<n;++x)
{
for(int y=0;y<n;++y)
{
if((y*(n+1)+x)%2==1)
{
int xl=x-1,yl=y,xu=x,yu=y-1,xr=x+1,yr=y,xd=x,yd=y+1;
if(x==0)
{
xl=n-1;
}
else if(x==n-1)
{
xr=0;
}
if(y==0)
{
yu=n-1;
}
else if(y==n-1)
{
yd=0;
}
//Initial local energy
int dH=2*S[y*n+x]*(
S[yl*n+xl]+
S[yr*n+xr]+
S[yu*n+xu]+
S[yd*n+xd]
);
if(dH==4)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_4) {
S[y*n+x]=-S[y*n+x];
}
}
else if(dH==8)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_8)
{
S[y*n+x]=-S[y*n+x];
}
}
else
{
S[y*n+x]=-S[y*n+x];
}
}
}
}
}
//E[num_entries]=avg_H/GLOBAL_ITERATIONS;
//num_entries++;
}
}
|
3370691bebb3f49216a8d2dd02deb852489c6566.cu
|
/****
*
* GPU accelerated Monte Carlo simulation of the 2D Ising model
*
* Copyright (C) 2008 Tobias Preis (http://www.tobiaspreis.de)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, see
* http://www.gnu.org/licenses/.
*
* Related publication:
*
* T. Preis, P. Virnau, W. Paul, and J. J. Schneider,
* Journal of Computational Physics 228, 4468-4477 (2009)
* doi:10.1016/j.jcp.2009.03.018
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include "cuda_utils.h"
//#include <helper_cuda.h>
#define FLAG_PRINT_SPINS 0
#define FLAG_ENERGY 0
#define T_START 3.00
#define T_FACTOR 0.9
#define T_END 2.00
#define GLOBAL_ITERATIONS 100
#define RANDOM_A 1664525
#define RANDOM_B 1013904223
#define BLOCK_SIZE 512
const unsigned int N=4*BLOCK_SIZE*BLOCK_SIZE;
const unsigned int n=2*BLOCK_SIZE;
#include "rob.cu"
/****
*
* Function declaration
*
*/
void calc(int argc,char** argv);
void cpu_function(double*,int*);
void cpu_function_noreduce(double*,int*);
__global__ void device_function_main(int*,int*,int*,float,bool);
__global__ void device_function_main_noreduce(int*,int*,float,bool);
/****
*
* Main function
*
*/
int main(int argc,char** argv) {
calc(argc,argv);
}
/****
*
* Calc
*
*/
void calc(int argc,char** argv) {
printf("----------------------------------------------------------------------- \n");
printf(" *\n");
printf(" * GPU accelerated Monte Carlo simulation of the 2D Ising model\n");
printf(" *\n");
printf(" * Copyright (C) 2008 Tobias Preis (http://www.tobiaspreis.de)\n");
printf(" *\n");
printf(" * This program is free software; you can redistribute it and/or\n");
printf(" * modify it under the terms of the GNU General Public License\n");
printf(" * as published by the Free Software Foundation; either version\n");
printf(" * 3 of the License, or (at your option) any later version.\n");
printf(" *\n");
printf(" * This program is distributed in the hope that it will be useful,\n");
printf(" * but WITHOUT ANY WARRANTY; without even the implied warranty of\n");
printf(" * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n");
printf(" * GNU General Public License for more details.\n");
printf(" *\n");
printf(" * You should have received a copy of the GNU General Public\n");
printf(" * License along with this program; if not, see\n");
printf(" * http://www.gnu.org/licenses/\n");
printf(" *\n");
printf(" * Related publication:\n");
printf(" *\n");
printf(" * T. Preis, P. Virnau, W. Paul, and J. J. Schneider,\n");
printf(" * Journal of Computational Physics 228, 4468-4477 (2009)\n");
printf(" * doi:10.1016/j.jcp.2009.03.018\n");
printf(" *\n");
printf(" ----------------------------- Ising model ----------------------------- \n");
printf(" Number of Spins: %d \n",N);
printf(" Start Temperature: %f \n",T_START);
printf(" Decreasing Factor: %f \n",T_FACTOR);
printf(" Final Temperature: %f \n",T_END);
printf(" Global Iterations: %d \n",GLOBAL_ITERATIONS);
//Init
//CUT_DEVICE_INIT(argc,argv);
srand48(23);
//Allocate and init host memory for output arrays
int num_entries=0;
for(double t=T_START; t>=T_END; t=t*T_FACTOR) num_entries++;
unsigned int mem_out_size=sizeof(float)*num_entries;
float* h_T=(float*) malloc(mem_out_size);
float* h_E=(float*) malloc(mem_out_size);
unsigned int mem_ref_out_size=sizeof(double)*num_entries;
double* h_ref_E=(double*) malloc(mem_ref_out_size);
num_entries=0;
for(double t=T_START; t>=T_END; t=t*T_FACTOR) {
h_T[num_entries]=t;
num_entries++;
}
//Allocate and init host memory for simulation arrays
unsigned int mem_size=sizeof(int)*N;
unsigned int mem_size_random=sizeof(int)*BLOCK_SIZE*BLOCK_SIZE;
int* h_random_data=(int*) malloc(mem_size_random);
int* h_S=(int*) malloc(mem_size);
unsigned int mem_size_out=sizeof(int)*BLOCK_SIZE;
int* h_out=(int*) malloc(mem_size_out);
h_random_data[0]=1;
for(int i=1;i<BLOCK_SIZE*BLOCK_SIZE;i++) {
h_random_data[i]=16807*h_random_data[i-1];
}
for(int i=0;i<N;i++) {
if(drand48()>0.5) h_S[i]=-1;
else h_S[i]=1;
}
//Create and start timer
float gpu_sum=0;
/*//toby
unsigned int timer=0;
CUDA_CHECK_ERROR(cudaThreadSynchronize());
CUDA_CHECK_ERROR(cutCreateTimer(&timer));
CUDA_CHECK_ERROR(cutStartTimer(timer));*/
//mine
StopWatch stopwatch;
stopwatch.start();
//Allocate device memory for arrays
int* d_random_data;
int* d_S;
int* d_out;
CUDA_CHECK_ERROR(cudaMalloc((void**) &d_random_data,mem_size_random));
CUDA_CHECK_ERROR(cudaMalloc((void**) &d_S,mem_size));
CUDA_CHECK_ERROR(cudaMalloc((void**) &d_out,mem_size_out));
//Stop and destroy timer
printf("\n --------------------------------- GPU --------------------------------- \n");
/* //toby
CUDA_CHECK_ERROR(cudaThreadSynchronize());
CUDA_CHECK_ERROR(cutStopTimer(timer));
float gpu_dt_malloc=cutGetTimerValue(timer);
printf(" Processing time on GPU for allocating: %f (ms) \n",gpu_dt_malloc);
CUDA_CHECK_ERROR(cutDeleteTimer(timer));*/
//mine
float gpu_dt_malloc = stopwatch.stop();
printf(" Processing time on GPU for allocating (StopWatch): %f (ms) \n",gpu_dt_malloc);
gpu_sum+=gpu_dt_malloc;
//Create and start timer
/* //toby
timer=0;
CUDA_CHECK_ERROR(cudaThreadSynchronize());
CUDA_CHECK_ERROR(cutCreateTimer(&timer));
CUDA_CHECK_ERROR(cutStartTimer(timer));*/
//mine
stopwatch.start();
//Copy host memory to device and create mirror of d_S
CUDA_CHECK_ERROR(cudaMemcpy(d_random_data,h_random_data,mem_size_random,cudaMemcpyHostToDevice));
CUDA_CHECK_ERROR(cudaMemcpy(d_S,h_S,mem_size,cudaMemcpyHostToDevice));
//Stop and destroy timer
/* //toby
CUDA_CHECK_ERROR(cudaThreadSynchronize());
CUDA_CHECK_ERROR(cutStopTimer(timer));
float gpu_dt_mem=cutGetTimerValue(timer);
printf(" Processing time on GPU for memory transfer: %f (ms) \n",gpu_dt_mem);
CUDA_CHECK_ERROR(cutDeleteTimer(timer));*/
//mine
float gpu_dt_mem = stopwatch.stop();
printf(" Processing time on GPU for memory transfer (StopWatch): %f (ms) \n",gpu_dt_mem);
gpu_sum+=gpu_dt_mem;
//Print spins
if(FLAG_PRINT_SPINS) {
CUDA_CHECK_ERROR(cudaMemcpy(h_S,d_S,mem_size,cudaMemcpyDeviceToHost));
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
printf("\n");
}
//Create and start timer
/* //toby
timer=0;
CUDA_CHECK_ERROR(cudaThreadSynchronize());
CUDA_CHECK_ERROR(cutCreateTimer(&timer));
CUDA_CHECK_ERROR(cutStartTimer(timer));*/
//mine
stopwatch.start();
//improved kernel
KernelInputs ki;
//malloc on gpu
CUDA_CHECK_ERROR(cudaMalloc((void**) &ki.R,mem_size_random));
CUDA_CHECK_ERROR(cudaMalloc((void**) &ki.S,mem_size));
//mem copy
CUDA_CHECK_ERROR(cudaMemcpy(ki.R,h_random_data,mem_size_random,cudaMemcpyHostToDevice));
CUDA_CHECK_ERROR(cudaMemcpy(ki.S,h_S,mem_size,cudaMemcpyHostToDevice));
//ising 2d checkerboard kernels
IsingKernelInputs2d ki2d;
ki2d.Lx = n;
ki2d.Ly = n;
//shared memory kernel
// int xblocks = ceil(ki2d.Lx/2/BLOCKDIMX);
// int yblocks = ceil(ki2d.Ly/BLOCKDIMY);
// int xthreads = BLOCKDIMX;
// int ythreads = BLOCKDIMY;
//vertical line kernel
int xblocks = ceil(ki2d.Lx/2/THREADS_PER_BLOCK);
int yblocks = ceil(ki2d.Ly/LINE_LENGTH);
int xthreads = THREADS_PER_BLOCK;
int ythreads = 1;
dim3 isingGrid(xblocks,yblocks);
printf("Spin dim: %d, %d\n",ki2d.Lx,ki2d.Ly);
printf("Grid dim: %d, %d\n",xblocks,yblocks);
dim3 isingBlock(xthreads,ythreads);
printf("Block dim: %d, %d\n",xthreads,ythreads);
int S0[N/2];
int S1[N/2];
CUDA_CHECK_ERROR(cudaMalloc((void**) &ki2d.S0,mem_size/2));
CUDA_CHECK_ERROR(cudaMalloc((void**) &ki2d.S1,mem_size/2));
CUDA_CHECK_ERROR(cudaMalloc((void**) &ki2d.R,mem_size_random));
//split spins
int rows = ki2d.Ly;
int cols = ki2d.Lx;
int checkerboardRows = rows;
int checkerboardCols = cols/2;
for(int row=0;row<checkerboardRows;++row)
{
for(int col=0;col<checkerboardCols;++col)
{
bool evenrow = row & 1;
int a0 = 0;
int a1 = 1;
if(evenrow)
{
a0 = 1;
a1 = 0;
}
int checkerboardIndex = row*checkerboardCols + col;
S0[checkerboardIndex] = h_S[checkerboardIndex*2 + a0];
S1[checkerboardIndex] = h_S[checkerboardIndex*2 + a1];
}
}
CUDA_CHECK_ERROR(cudaMemcpy(ki2d.S0,S0,mem_size/2,cudaMemcpyHostToDevice));
CUDA_CHECK_ERROR(cudaMemcpy(ki2d.S1,S1,mem_size/2,cudaMemcpyHostToDevice));
CUDA_CHECK_ERROR(cudaMemcpy(ki2d.R,h_random_data,mem_size_random,cudaMemcpyHostToDevice));
if(argc > 1)
{
cudaDeviceSynchronize();
printf("Warning! Debug functions are on! The speedup will be MUCH lower.\n");
printf("Performing random number check\n");
if(not validateKernelInt(ki2d.R,d_random_data,BLOCK_SIZE*BLOCK_SIZE))//new ising
//if(not validateKernelInt(ki.R,d_random_data,BLOCK_SIZE*BLOCK_SIZE))//improved kernel
{
printf("Error!!! Validation function not working!!! (random values) \n");
}
printf("Performing spin check\n");
if(not validateCheckerboardInt(ki2d.S0,ki2d.S1,d_S,ki2d.Ly,ki2d.Lx))//new ising
//if(not validateKernelInt(ki.S,d_S,N))//improved kernel
{
printf("Error!!! Validation function not working!!! (spins) \n");
}
printf("Finished initial debug checks.\n");
}
//Calc energy
num_entries=0;
dim3 threads(BLOCK_SIZE);
dim3 grid(BLOCK_SIZE);
for(float t=T_START;t>=T_END;t=t*T_FACTOR)
{
//rob
bool validationFlag = true;
ki.exp4 = exp(-(4.0)/t);
ki.exp8 = exp(-(8.0)/t);
ki2d.exp4 = exp(-(4.0)/t);
ki2d.exp8 = exp(-(8.0)/t);
//
//double avg_H=0;
for(int global_iteration=0;global_iteration<GLOBAL_ITERATIONS;global_iteration++)
{
//toby
if(argc > 1)
{
//device_function_main<<<grid,threads>>>(d_S,d_out,d_random_data,t,true);
//device_function_main<<<grid,threads>>>(d_S,d_out,d_random_data,t,false);
device_function_main_noreduce<<<grid,threads>>>(d_S,d_random_data,t,true);
device_function_main_noreduce<<<grid,threads>>>(d_S,d_random_data,t,false);
}
//device_rob<<<grid,threads>>>(ki,true);
//device_rob<<<grid,threads>>>(ki,false);
ising2d<<<isingGrid,isingBlock>>>(ki2d,ki2d.S0,ki2d.S1,true);
ising2d<<<isingGrid,isingBlock>>>(ki2d,ki2d.S1,ki2d.S0,false);
//device_function_main_noreduce<<<grid,threads>>>(d_S,d_random_data,t,true);
//device_function_main_noreduce<<<grid,threads>>>(d_S,d_random_data,t,false);
//validate
if(argc > 1)
{
cudaDeviceSynchronize();
//if(not validateKernelInt(ki.R,d_random_data,BLOCK_SIZE*BLOCK_SIZE))//improved kernel
if(not validateKernelInt(ki2d.R,d_random_data,BLOCK_SIZE*BLOCK_SIZE))//new ising kernel
{
printf("Error!!! Random data results are not same!!! (iteration %d)\n",global_iteration);
validationFlag = false;
break;
}
//if(not validateKernelInt(ki.S,d_S,N))//improved kernel
if(not validateCheckerboardInt(ki2d.S0,ki2d.S1,d_S,ki2d.Ly,ki2d.Lx))//new ising kernel
{
printf("Error!!! Spin results are not same!!! (iteration %d)\n",global_iteration);
validationFlag = false;
break;
}
}
cudaDeviceSynchronize();
//CUDA_CHECK_ERROR(cudaMemcpy(h_out,d_out,mem_size_out,cudaMemcpyDeviceToHost));
//int energy_sum=0;
//for(int i=0;i<BLOCK_SIZE;i++) energy_sum+=h_out[i];
//avg_H+=(float)energy_sum/N;
}
//h_E[num_entries]=avg_H/GLOBAL_ITERATIONS;
//num_entries++;
if(argc > 1 and validationFlag)
{
printf("Passed validation checks for t = %f\n",t);
}
}
//Stop and destroy timer
/* //toby
CUDA_CHECK_ERROR(cudaThreadSynchronize());
CUDA_CHECK_ERROR(cutStopTimer(timer));
float gpu_dt_main=cutGetTimerValue(timer);
printf(" Processing time on GPU for main function: %f (ms) \n",gpu_dt_main);
CUDA_CHECK_ERROR(cutDeleteTimer(timer));*/
//mine
float gpu_dt_main = stopwatch.stop();
printf(" Processing time on GPU for main function (StopWatch): %f (ms) \n",gpu_dt_main);
gpu_sum+=gpu_dt_main;
printf(" Total processing time on GPU: %f (ms) \n",gpu_sum);
//Check kernel execution
//CUDA_CHECK_ERROR("Kernel execution failed");
//Print spins
if(FLAG_PRINT_SPINS)
{
CUDA_CHECK_ERROR(cudaMemcpy(h_S,d_S,mem_size,cudaMemcpyDeviceToHost));
for(int i=0;i<BLOCK_SIZE*2;i++)
{
for(int j=0;j<BLOCK_SIZE*2;j++)
{
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
}
//Create and start timer
/* //toby
timer=0;
CUDA_CHECK_ERROR(cudaThreadSynchronize());
CUDA_CHECK_ERROR(cutCreateTimer(&timer));
CUDA_CHECK_ERROR(cutStartTimer(timer));*/
//mine
stopwatch.start();
//Reference solution
cpu_function_noreduce(h_ref_E,h_S);
//Print spins
if(FLAG_PRINT_SPINS)
{
printf("\n");
for(int i=0;i<BLOCK_SIZE*2;i++)
{
for(int j=0;j<BLOCK_SIZE*2;j++)
{
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
}
//Stop and destroy timer
printf("\n --------------------------------- CPU --------------------------------- \n");
/* //toby
CUDA_CHECK_ERROR(cudaThreadSynchronize());
CUDA_CHECK_ERROR(cutStopTimer(timer));
float cpu_sum=cutGetTimerValue(timer);
printf(" Total processing time on CPU: %f (ms) \n",cpu_sum);
CUDA_CHECK_ERROR(cutDeleteTimer(timer));*/
//rob
float cpu_sum = stopwatch.stop();
printf(" Total processing time on CPU (StopWatch): %f (ms) \n",cpu_sum);
printf("\n Speedup: %fX \n\n",(cpu_sum/gpu_sum));
//Cleaning memory
free(h_T);
free(h_E);
free(h_ref_E);
free(h_random_data);
free(h_S);
free(h_out);
CUDA_CHECK_ERROR(cudaFree(d_random_data));
CUDA_CHECK_ERROR(cudaFree(d_S));
CUDA_CHECK_ERROR(cudaFree(d_out));
CUDA_CHECK_ERROR(cudaFree(ki.R));
CUDA_CHECK_ERROR(cudaFree(ki.S));
CUDA_CHECK_ERROR(cudaFree(ki2d.S0));
CUDA_CHECK_ERROR(cudaFree(ki2d.S1));
CUDA_CHECK_ERROR(cudaFree(ki2d.R));
}
/****
*
* Device function main
*
*/
__global__ void device_function_main(int* S,int* out,int* R,float t,bool flag)
{
//Energy variable
int dH=0;
float exp_dH_4=exp(-(4.0)/t);
float exp_dH_8=exp(-(8.0)/t);
//Allocate shared memory
__shared__ int r[BLOCK_SIZE];
//Load random data
r[threadIdx.x]=R[threadIdx.x+BLOCK_SIZE*blockIdx.x];
__syncthreads();
//Stencil computation on spins
//No shared memory utilization for S?!?!?
if(flag)
{
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top left
if(blockIdx.x==0)
{ //Top
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
}
else
{
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
}
else
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom right
if(blockIdx.x==BLOCK_SIZE-1)
{ //Bottom
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
else
{
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
__syncthreads();
}
else
{
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top right
if(blockIdx.x==0)
{ //Top
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
}
else
{
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
}
else
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom left
if(blockIdx.x==BLOCK_SIZE-1)
{ //Bottom
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
else
{
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
//Transfer random data back to global memory
R[threadIdx.x+BLOCK_SIZE*blockIdx.x]=r[threadIdx.x];
if(!flag)
{
//For reduction shared memory array r is used
if(FLAG_ENERGY)
{
//Calc energy
if(blockIdx.x==BLOCK_SIZE-1)
{ //Bottom
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1-2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+S[2*threadIdx.x+1]);
}
else
{
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2+2*BLOCK_SIZE]+S[2*threadIdx.x+1]);
}
}
else
{
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1-2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]);
}
else
{
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2+2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]);
}
}
__syncthreads();
}
else
{
//Calc magnetisation
dH=S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]
+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]
+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]
+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
__syncthreads();
}
//Save partial results back to shared memory in new structure
r[threadIdx.x]=dH;
//Reduction on GPU
for(unsigned int dx=1;dx<BLOCK_SIZE;dx*=2)
{
if(threadIdx.x%(2*dx)==0)
{
r[threadIdx.x]+=r[threadIdx.x+dx];
}
__syncthreads();
}
//Save in out
if(threadIdx.x==0) out[blockIdx.x]=r[0];
}
}
__global__ void device_function_main_noreduce(int* S,int* R,float t,bool flag)
{
//Energy variable
int dH=0;
float exp_dH_4=exp(-(4.0)/t);
float exp_dH_8=exp(-(8.0)/t);
//Allocate shared memory
__shared__ int r[BLOCK_SIZE];
//Load random data
r[threadIdx.x]=R[threadIdx.x+BLOCK_SIZE*blockIdx.x];
__syncthreads();
//Stencil computation on spins
//No shared memory utilization for S?!?!?
if(flag)
{
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top left
if(blockIdx.x==0)
{ //Top
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
}
else
{
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
// int globalIndex = 2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x;
// int ii[4];
// ii[0]=globalIndex-2*BLOCK_SIZE+N;
// ii[1]=globalIndex+1;
// ii[2]=globalIndex+2*BLOCK_SIZE;
// ii[3]=globalIndex-1;
// int i=10;
// if(globalIndex == i)
// {
// printf("Old kernel spins at i=%d: (t,r,b,l): %d,%d,%d,%d,%d,%d,%d,%d\n",
// i,
// S[globalIndex+1],
// S[globalIndex-1],
// S[globalIndex+2*BLOCK_SIZE],
// S[globalIndex-2*BLOCK_SIZE+N],
// ii[0],ii[1],ii[2],ii[3]
// );
// }
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
}
else
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom right
if(blockIdx.x==BLOCK_SIZE-1)
{ //Bottom
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
else
{
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
__syncthreads();
}
else
{
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top right
if(blockIdx.x==0)
{ //Top
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
}
else
{
if(threadIdx.x==BLOCK_SIZE-1)
{ //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else
{
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
}
else
{
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom left
if(blockIdx.x==BLOCK_SIZE-1)
{ //Bottom
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
else
{
if(threadIdx.x==0)
{ //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else
{
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8)
{
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8)
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else
{
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
//Transfer random data back to global memory
R[threadIdx.x+BLOCK_SIZE*blockIdx.x]=r[threadIdx.x];
}
/****
*
* CPU function
*
*/
void cpu_function(double* E, int* S)
{
int random=23;
int num_entries=0;
for(double t=T_START;t>=T_END;t=t*T_FACTOR)
{
double avg_H=0;
double exp_dH_4=exp(-(4.0)/t);
double exp_dH_8=exp(-(8.0)/t);
for(int global_iteration=0;global_iteration<GLOBAL_ITERATIONS;++global_iteration)
{
if(FLAG_ENERGY)
{
//Energy
double H=0;
for(int x=0;x<n;++x)
{
for(int y=0;y<n;++y)
{
int xr=x+1,yd=y+1;
if(xr==n) xr=0;
if(yd==n) yd=0;
H+=-S[y*n+x]*(S[y*n+xr]+S[yd*n+x]);
}
}
avg_H+=H/N;
} else {
//Magnetisation
double H=0;
for(int x=0;x<N;++x)
{
H+=S[x];
}
avg_H+=H/N;
}
for(int x=0;x<n;++x)
{
for(int y=0;y<n;++y)
{
if((y*(n+1)+x)%2==0)
{
int xl=x-1,yl=y,xu=x,yu=y-1,xr=x+1,yr=y,xd=x,yd=y+1;
if(x==0)
{
xl=n-1;
}
else if(x==n-1)
{
xr=0;
}
if(y==0)
{
yu=n-1;
}
else if(y==n-1)
{
yd=0;
}
//Initial local energy
int dH=2*S[y*n+x]*(
S[yl*n+xl]+
S[yr*n+xr]+
S[yu*n+xu]+
S[yd*n+xd]
);
if(dH==4)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_4)
{
S[y*n+x]=-S[y*n+x];
}
}
else if(dH==8)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_8)
{
S[y*n+x]=-S[y*n+x];
}
}
else
{
S[y*n+x]=-S[y*n+x];
}
}
}
}
for(int x=0;x<n;++x)
{
for(int y=0;y<n;++y)
{
if((y*(n+1)+x)%2==1)
{
int xl=x-1,yl=y,xu=x,yu=y-1,xr=x+1,yr=y,xd=x,yd=y+1;
if(x==0)
{
xl=n-1;
}
else if(x==n-1)
{
xr=0;
}
if(y==0)
{
yu=n-1;
}
else if(y==n-1)
{
yd=0;
}
//Initial local energy
int dH=2*S[y*n+x]*(
S[yl*n+xl]+
S[yr*n+xr]+
S[yu*n+xu]+
S[yd*n+xd]
);
if(dH==4)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_4) {
S[y*n+x]=-S[y*n+x];
}
}
else if(dH==8)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_8)
{
S[y*n+x]=-S[y*n+x];
}
}
else
{
S[y*n+x]=-S[y*n+x];
}
}
}
}
}
E[num_entries]=avg_H/GLOBAL_ITERATIONS;
num_entries++;
}
}
void cpu_function_noreduce(double* E, int* S)
{
int random=23;
//int num_entries=0;
for(double t=T_START;t>=T_END;t=t*T_FACTOR)
{
//double avg_H=0;
double exp_dH_4=exp(-(4.0)/t);
double exp_dH_8=exp(-(8.0)/t);
for(int global_iteration=0;global_iteration<GLOBAL_ITERATIONS;++global_iteration)
{
/*if(FLAG_ENERGY)
{
//Energy
double H=0;
for(int x=0;x<n;++x)
{
for(int y=0;y<n;++y)
{
int xr=x+1,yd=y+1;
if(xr==n) xr=0;
if(yd==n) yd=0;
H+=-S[y*n+x]*(S[y*n+xr]+S[yd*n+x]);
}
}
avg_H+=H/N;
} else {
//Magnetisation
double H=0;
for(int x=0;x<N;++x)
{
H+=S[x];
}
avg_H+=H/N;
}*/
for(int x=0;x<n;++x)
{
for(int y=0;y<n;++y)
{
if((y*(n+1)+x)%2==0)
{
int xl=x-1,yl=y,xu=x,yu=y-1,xr=x+1,yr=y,xd=x,yd=y+1;
if(x==0)
{
xl=n-1;
}
else if(x==n-1)
{
xr=0;
}
if(y==0)
{
yu=n-1;
}
else if(y==n-1)
{
yd=0;
}
//Initial local energy
int dH=2*S[y*n+x]*(
S[yl*n+xl]+
S[yr*n+xr]+
S[yu*n+xu]+
S[yd*n+xd]
);
if(dH==4)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_4)
{
S[y*n+x]=-S[y*n+x];
}
}
else if(dH==8)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_8)
{
S[y*n+x]=-S[y*n+x];
}
}
else
{
S[y*n+x]=-S[y*n+x];
}
}
}
}
for(int x=0;x<n;++x)
{
for(int y=0;y<n;++y)
{
if((y*(n+1)+x)%2==1)
{
int xl=x-1,yl=y,xu=x,yu=y-1,xr=x+1,yr=y,xd=x,yd=y+1;
if(x==0)
{
xl=n-1;
}
else if(x==n-1)
{
xr=0;
}
if(y==0)
{
yu=n-1;
}
else if(y==n-1)
{
yd=0;
}
//Initial local energy
int dH=2*S[y*n+x]*(
S[yl*n+xl]+
S[yr*n+xr]+
S[yu*n+xu]+
S[yd*n+xd]
);
if(dH==4)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_4) {
S[y*n+x]=-S[y*n+x];
}
}
else if(dH==8)
{
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_8)
{
S[y*n+x]=-S[y*n+x];
}
}
else
{
S[y*n+x]=-S[y*n+x];
}
}
}
}
}
//E[num_entries]=avg_H/GLOBAL_ITERATIONS;
//num_entries++;
}
}
|
b1387d4aca6a4ac001e20edb62d47debff0241af.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ConditionCFLKernel1D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *Rsup = NULL;
hipMalloc(&Rsup, XSIZE*YSIZE);
double *Rinf = NULL;
hipMalloc(&Rinf, XSIZE*YSIZE);
double *Rmed = NULL;
hipMalloc(&Rmed, XSIZE*YSIZE);
int nrad = 1;
int nsec = 1;
double *Vtheta = NULL;
hipMalloc(&Vtheta, XSIZE*YSIZE);
double *Vmoy = NULL;
hipMalloc(&Vmoy, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ConditionCFLKernel1D), dim3(gridBlock),dim3(threadBlock), 0, 0, Rsup,Rinf,Rmed,nrad,nsec,Vtheta,Vmoy);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ConditionCFLKernel1D), dim3(gridBlock),dim3(threadBlock), 0, 0, Rsup,Rinf,Rmed,nrad,nsec,Vtheta,Vmoy);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ConditionCFLKernel1D), dim3(gridBlock),dim3(threadBlock), 0, 0, Rsup,Rinf,Rmed,nrad,nsec,Vtheta,Vmoy);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
b1387d4aca6a4ac001e20edb62d47debff0241af.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ConditionCFLKernel1D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *Rsup = NULL;
cudaMalloc(&Rsup, XSIZE*YSIZE);
double *Rinf = NULL;
cudaMalloc(&Rinf, XSIZE*YSIZE);
double *Rmed = NULL;
cudaMalloc(&Rmed, XSIZE*YSIZE);
int nrad = 1;
int nsec = 1;
double *Vtheta = NULL;
cudaMalloc(&Vtheta, XSIZE*YSIZE);
double *Vmoy = NULL;
cudaMalloc(&Vmoy, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ConditionCFLKernel1D<<<gridBlock,threadBlock>>>(Rsup,Rinf,Rmed,nrad,nsec,Vtheta,Vmoy);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ConditionCFLKernel1D<<<gridBlock,threadBlock>>>(Rsup,Rinf,Rmed,nrad,nsec,Vtheta,Vmoy);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ConditionCFLKernel1D<<<gridBlock,threadBlock>>>(Rsup,Rinf,Rmed,nrad,nsec,Vtheta,Vmoy);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
a36ca1c8c06c7ef7a274283e8fa0d7e6586e8d1f.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
a36ca1c8c06c7ef7a274283e8fa0d7e6586e8d1f.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
a9d79d8f2e57e3b28dbd749a51347273bbb5cd79.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/local/cuda-convnet2/filter_acts/filter_act_sparse2_y4x32i4f16c4_tex.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
/**
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* --------------------------------------------------------------------------
* * This file has been modified by Megvii ("Megvii Modifications").
* * All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
* --------------------------------------------------------------------------
*/
#include "filter_act_templates.cuh"
namespace megdnn {
namespace cuda {
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex (FILTER_ACTS_PARAMS) {
__shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters
__shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images
fill_shared_mem<float>((float *)shFilters, sizeof(shFilters)/sizeof(float), 0);
fill_shared_mem<float>((float *)shImages, sizeof(shImages)/sizeof(float), 0);
__syncthreads();
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY;
const int blockColorIdx = numFilterColors * blockGroupIdx;
// Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is
// in the range 0..31. It appears that this allows the compiler to optimize?
const int tx = threadIdx.x % B_X;
const int ty = threadIdx.y % B_Y;
const int tidx = ty * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
const int imgOffset = (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx;
// images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx;
const int filterOffset = blockFilterIdx
+ shFilterLoadY * numFilters * filterPixels + shFilterLoadX + (conv ? 0 : moduleIdx * numFilterColors * filterPixels * numFilters);
// filters +=blockFilterIdx
// + shFilterLoadY * numFilters * filterPixels + shFilterLoadX;
// if (!conv) {
// filters += moduleIdx * numFilterColors * filterPixels * numFilters;
// }
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules
+ myImgIdx;
float prod[imgsPerThread][filtersPerThread];
// float fCache[filtersPerThread];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] = 0;
}
}
// NOTE: these max/min functions increase register usage as compared to my macros
const int imgStartX = max(0, imgLoadModPosX);
const int imgStartY = max(0, imgLoadModPosY);
const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX);
const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY);
// __shared__ int imgPos[]
int fPidx, iPidx;
float imPreload[imgsPerThread]; // [4]
float fPreload[colorCache*filtersPerThread/B_X]; // [2]
// float fCache[filtersPerThread];
filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
imPreload[i] = tex1Dfetch<float>(images, imgOffset + imgStride * iPidx + i * B_X);
} else {
imPreload[i] = 0;
}
}
if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage..
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
fPreload[c*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filterOffset + (c * filterPixels + fPidx) * numFilters);
}
}
for (int imgY = imgStartY; imgY < imgEndY; ++imgY) {
// const int filterPxY = imgY - imgLoadModPosY;
for (int imgX = imgStartX; imgX < imgEndX; ++imgX) {
// const int filterPxX = imgX - imgLoadModPosX;
// const int p = filterPxY * filterSize + filterPxX;
// const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img
// setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx);
// float* m = &images[imgStride * pixIdx];
const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1;
int imgYNext = imgY;
int imgXNext = imgX;
int fPidxNext, iPidxNext;
if (!lastPixel) {
imgYNext = imgY + (imgX + 1 == imgEndX);
imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1;
}
filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext);
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
// const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)];
// const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)];
int imgOffset2 = imgOffset + imgStride * ((oc + colorCache) * imgPixels + iPidx);
int filterOffset2 = filterOffset + numFilters * ((oc + colorCache) * filterPixels + fPidx);
if (oc == numFilterColors - colorCache) {
filterOffset2 = filterOffset + fPidxNext * numFilters;
imgOffset2 = imgOffset + iPidxNext * imgStride;
fPidx = fPidxNext;
iPidx = iPidxNext;
}
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X];
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
shImages[ty][tx * imgsPerThread + i] = imPreload[i];
}
imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 0 * B_X);
imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 1 * B_X);
imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 2 * B_X);
__syncthreads();
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f];
}
}
fPreload[0] = tex1Dfetch<float>(filters, filterOffset2 + 0);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f];
}
}
fPreload[1] = tex1Dfetch<float>(filters, filterOffset2 + (B_X/filtersPerThread * filterPixels) * numFilters);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f];
}
}
imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 3 * B_X);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f];
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f];
}
}
}
} else {
// Note: reversing order of these loops saves 2 registers, but costs time
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f];
}
}
}
}
}
template __global__ void
filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex
< 4, 32, 4, 16, 4, false, false >(FILTER_ACTS_PARAMS);
template __global__ void
filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex
< 4, 32, 4, 16, 4, true, false >(FILTER_ACTS_PARAMS);
} // namespace cuda
} // namespace megdnn
|
a9d79d8f2e57e3b28dbd749a51347273bbb5cd79.cu
|
/**
* \file dnn/src/cuda/local/cuda-convnet2/filter_acts/filter_act_sparse2_y4x32i4f16c4_tex.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
/**
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* --------------------------------------------------------------------------
* * This file has been modified by Megvii ("Megvii Modifications").
* * All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
* --------------------------------------------------------------------------
*/
#include "filter_act_templates.cuh"
namespace megdnn {
namespace cuda {
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex (FILTER_ACTS_PARAMS) {
__shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters
__shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images
fill_shared_mem<float>((float *)shFilters, sizeof(shFilters)/sizeof(float), 0);
fill_shared_mem<float>((float *)shImages, sizeof(shImages)/sizeof(float), 0);
__syncthreads();
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY;
const int blockColorIdx = numFilterColors * blockGroupIdx;
// Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is
// in the range 0..31. It appears that this allows the compiler to optimize?
const int tx = threadIdx.x % B_X;
const int ty = threadIdx.y % B_Y;
const int tidx = ty * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
const int imgOffset = (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx;
// images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx;
const int filterOffset = blockFilterIdx
+ shFilterLoadY * numFilters * filterPixels + shFilterLoadX + (conv ? 0 : moduleIdx * numFilterColors * filterPixels * numFilters);
// filters +=blockFilterIdx
// + shFilterLoadY * numFilters * filterPixels + shFilterLoadX;
// if (!conv) {
// filters += moduleIdx * numFilterColors * filterPixels * numFilters;
// }
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules
+ myImgIdx;
float prod[imgsPerThread][filtersPerThread];
// float fCache[filtersPerThread];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] = 0;
}
}
// NOTE: these max/min functions increase register usage as compared to my macros
const int imgStartX = max(0, imgLoadModPosX);
const int imgStartY = max(0, imgLoadModPosY);
const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX);
const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY);
// __shared__ int imgPos[]
int fPidx, iPidx;
float imPreload[imgsPerThread]; // [4]
float fPreload[colorCache*filtersPerThread/B_X]; // [2]
// float fCache[filtersPerThread];
filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
imPreload[i] = tex1Dfetch<float>(images, imgOffset + imgStride * iPidx + i * B_X);
} else {
imPreload[i] = 0;
}
}
if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage..
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
fPreload[c*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filterOffset + (c * filterPixels + fPidx) * numFilters);
}
}
for (int imgY = imgStartY; imgY < imgEndY; ++imgY) {
// const int filterPxY = imgY - imgLoadModPosY;
for (int imgX = imgStartX; imgX < imgEndX; ++imgX) {
// const int filterPxX = imgX - imgLoadModPosX;
// const int p = filterPxY * filterSize + filterPxX;
// const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img
// setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx);
// float* m = &images[imgStride * pixIdx];
const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1;
int imgYNext = imgY;
int imgXNext = imgX;
int fPidxNext, iPidxNext;
if (!lastPixel) {
imgYNext = imgY + (imgX + 1 == imgEndX);
imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1;
}
filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext);
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
// const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)];
// const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)];
int imgOffset2 = imgOffset + imgStride * ((oc + colorCache) * imgPixels + iPidx);
int filterOffset2 = filterOffset + numFilters * ((oc + colorCache) * filterPixels + fPidx);
if (oc == numFilterColors - colorCache) {
filterOffset2 = filterOffset + fPidxNext * numFilters;
imgOffset2 = imgOffset + iPidxNext * imgStride;
fPidx = fPidxNext;
iPidx = iPidxNext;
}
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X];
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
shImages[ty][tx * imgsPerThread + i] = imPreload[i];
}
imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 0 * B_X);
imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 1 * B_X);
imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 2 * B_X);
__syncthreads();
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f];
}
}
fPreload[0] = tex1Dfetch<float>(filters, filterOffset2 + 0);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f];
}
}
fPreload[1] = tex1Dfetch<float>(filters, filterOffset2 + (B_X/filtersPerThread * filterPixels) * numFilters);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f];
}
}
imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 3 * B_X);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f];
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f];
}
}
}
} else {
// Note: reversing order of these loops saves 2 registers, but costs time
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f];
}
}
}
}
}
template __global__ void
filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex
< 4, 32, 4, 16, 4, false, false >(FILTER_ACTS_PARAMS);
template __global__ void
filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex
< 4, 32, 4, 16, 4, true, false >(FILTER_ACTS_PARAMS);
} // namespace cuda
} // namespace megdnn
|
b1e0e70f26e9bdc10491d31631fa55bc669f4fe7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
inline __device__ void planes_voxels_mapping(
float * voxel_grid,
int * ray_voxel_indices,
int * ray_voxel_count,
float * ray_start,
float * ray_end,
float * S,
float * S_new
) {
// Declare some variables
int M = ray_voxel_count[0]; // The number of voxels for this ray
float sum = 0.0;
float eps = 1e-4;
// Compute the ray
float ray[3];
for (int i=0; i<3; i++) {
ray[i] = ray_end[i] - ray_start[i];
// printf("%f %f\\n", ray[i]);
}
float ray_norm = 0.0;
for (int i=0; i<3; i++) {
ray_norm += ray[i] * ray[i];
}
// Declare some variables
float vd, t, left_d, right_d, coeff_1, coeff_2;
float start = 0.0;
float end = 1.0;
float step = (end - start) / ($depth_planes - 1);
int left=0, right=1;
int idx_x, idx_y, idx_z;
int dim_x = 3*$grid_y*$grid_z;
int dim_y = 3*$grid_z;
int dim_z = 3;
float srsum = 0.0;
for (int i=0; i<M; i++) {
// Compute the dot product of the ray with the voxels directions in
// order to project the voxel centers on the ray
sum = 0.0;
idx_x = ray_voxel_indices[3*i];
idx_y = ray_voxel_indices[3*i + 1];
idx_z = ray_voxel_indices[3*i + 2];
//printf("%f-%f-%f \\n", voxel_grid[0], voxel_grid[1], voxel_grid[2]);
for (int j=0; j<3; j++) {
// Compute the directions of the voxels centers in this axis
vd = voxel_grid[idx_x*dim_x + idx_y*dim_y + idx_z*dim_z + j];
vd -= ray_start[j];
sum += ray[j] * vd;
}
// Update the value and make sure that t is between 0 and 1
t = clamp(
sum / ray_norm,
eps,
1-eps
);
// For every voxel center find the two closest depth planes
left_d = t - (start + left*step);
right_d = t - (start + right*step);
while (left_d > 0 && right_d > 0) {
left++;
right++;
left_d = t - (start + left*step);
right_d = t - (start + right*step);
}
left_d = abs(left_d);
right_d = abs(right_d);
// Compute the interpolation coeeficients
coeff_1 = 1.0 - (left_d / (left_d + right_d));
coeff_2 = 1.0 - (right_d / (left_d + right_d));
S_new[i] = coeff_1 * S[left] + coeff_2 * S[right];
srsum += S_new[i];
}
// Normalize the output depth distribution before exiting
for (int i=0; i<M; i++) {
S_new[i] = S_new[i] / srsum;
}
}
__global__ void batch_planes_voxels_mapping(
int n_rays,
float * voxel_grid,
int * ray_voxel_indices,
int * ray_voxel_count,
float * ray_start,
float * ray_end,
float * S,
float * S_new
) {
// Compute the ray that this thread is going to be computing stuff for
int r = threadIdx.x + blockDim.x * blockIdx.x;
if (r >= n_rays)
return;
planes_voxels_mapping(
voxel_grid,
ray_voxel_indices + 3*$max_voxels*r,
ray_voxel_count + r,
ray_start + 3*r,
ray_end + 3*r,
S + r*$depth_planes,
S_new + $max_voxels*r
);
}
|
b1e0e70f26e9bdc10491d31631fa55bc669f4fe7.cu
|
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
inline __device__ void planes_voxels_mapping(
float * voxel_grid,
int * ray_voxel_indices,
int * ray_voxel_count,
float * ray_start,
float * ray_end,
float * S,
float * S_new
) {
// Declare some variables
int M = ray_voxel_count[0]; // The number of voxels for this ray
float sum = 0.0;
float eps = 1e-4;
// Compute the ray
float ray[3];
for (int i=0; i<3; i++) {
ray[i] = ray_end[i] - ray_start[i];
// printf("%f %f\\n", ray[i]);
}
float ray_norm = 0.0;
for (int i=0; i<3; i++) {
ray_norm += ray[i] * ray[i];
}
// Declare some variables
float vd, t, left_d, right_d, coeff_1, coeff_2;
float start = 0.0;
float end = 1.0;
float step = (end - start) / ($depth_planes - 1);
int left=0, right=1;
int idx_x, idx_y, idx_z;
int dim_x = 3*$grid_y*$grid_z;
int dim_y = 3*$grid_z;
int dim_z = 3;
float srsum = 0.0;
for (int i=0; i<M; i++) {
// Compute the dot product of the ray with the voxels directions in
// order to project the voxel centers on the ray
sum = 0.0;
idx_x = ray_voxel_indices[3*i];
idx_y = ray_voxel_indices[3*i + 1];
idx_z = ray_voxel_indices[3*i + 2];
//printf("%f-%f-%f \\n", voxel_grid[0], voxel_grid[1], voxel_grid[2]);
for (int j=0; j<3; j++) {
// Compute the directions of the voxels centers in this axis
vd = voxel_grid[idx_x*dim_x + idx_y*dim_y + idx_z*dim_z + j];
vd -= ray_start[j];
sum += ray[j] * vd;
}
// Update the value and make sure that t is between 0 and 1
t = clamp(
sum / ray_norm,
eps,
1-eps
);
// For every voxel center find the two closest depth planes
left_d = t - (start + left*step);
right_d = t - (start + right*step);
while (left_d > 0 && right_d > 0) {
left++;
right++;
left_d = t - (start + left*step);
right_d = t - (start + right*step);
}
left_d = abs(left_d);
right_d = abs(right_d);
// Compute the interpolation coeeficients
coeff_1 = 1.0 - (left_d / (left_d + right_d));
coeff_2 = 1.0 - (right_d / (left_d + right_d));
S_new[i] = coeff_1 * S[left] + coeff_2 * S[right];
srsum += S_new[i];
}
// Normalize the output depth distribution before exiting
for (int i=0; i<M; i++) {
S_new[i] = S_new[i] / srsum;
}
}
__global__ void batch_planes_voxels_mapping(
int n_rays,
float * voxel_grid,
int * ray_voxel_indices,
int * ray_voxel_count,
float * ray_start,
float * ray_end,
float * S,
float * S_new
) {
// Compute the ray that this thread is going to be computing stuff for
int r = threadIdx.x + blockDim.x * blockIdx.x;
if (r >= n_rays)
return;
planes_voxels_mapping(
voxel_grid,
ray_voxel_indices + 3*$max_voxels*r,
ray_voxel_count + r,
ray_start + 3*r,
ray_end + 3*r,
S + r*$depth_planes,
S_new + $max_voxels*r
);
}
|
b5857dc37a93bddb5b1582ea0e49d1dbf6d23a2b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
// to build on Titan V:
// nvcc -arch=sm_70 --ptxas-options=-v -o vanilladeriv vanilladeriv.cu;
#ifdef USE_DOUBLE
#define dfloat double
#else
#define dfloat float
#endif
#ifndef POLYNOMIAL_ORDER
#define POLYNOMIAL_ORDER 4
#endif
template <int Nq, int Np>
__global__ void volumerhs(dfloat * __restrict__ rhs,
const dfloat * __restrict__ Q,
const int nelem){
__shared__ dfloat s_F[Nq][Nq];
dfloat r_rhsR[Nq];
int e = blockIdx.x;
int j = threadIdx.y;
int i = threadIdx.x;
#pragma unroll Nq
for(int k=0;k<Nq;++k) r_rhsR[k] = 0;
#pragma unroll Nq
for(int k=0;k<Nq;++k){
__syncthreads();
int qid = i + j*Nq + k*Nq*Nq + e*Np;
s_F[i][j] = Q[qid];
__syncthreads();
#pragma unroll Nq
for(int n=0;n<Nq;++n){
r_rhsR[k] += s_F[n][j];
r_rhsR[k] += s_F[n][i];
r_rhsR[k] += s_F[j][n];
r_rhsR[k] += s_F[i][n];
}
}
#pragma unroll Nq
for(int k=0;k<Nq;++k){
int qid = i + j*Nq + k*Nq*Nq + e*Np;
rhs[qid] += r_rhsR[k];
}
}
void randArray(int N, dfloat base, dfloat range, dfloat **q, dfloat **c_q){
*q = (dfloat*) calloc(N, sizeof(dfloat));
hipMalloc(c_q, N*sizeof(dfloat));
for(int n=0;n<N;++n){
q[0][n] = base + drand48()*range;
}
hipMemcpy(c_q[0], q[0], N*sizeof(dfloat), hipMemcpyHostToDevice);
}
int main(int argc, char **argv){
srand48(1234);
const int N = POLYNOMIAL_ORDER;
const int nelem = 4000;
const int Nq = N+1;
const int Np = Nq*Nq*Nq;
const int Ntotal = Np*nelem;
dfloat *Q, *c_Q;
randArray(Ntotal, 0., 1., &Q, &c_Q);
hipMemcpy(c_Q, Q, nelem*Np*sizeof(dfloat), hipMemcpyHostToDevice);
dfloat *rhs, *c_rhs;
srand48(1234);
randArray(Ntotal, 1., 1., &rhs, &c_rhs);
dim3 G(nelem,1,1);
dim3 B2(Nq,Nq,Nq);
dim3 B3(Nq,Nq,1);
hipLaunchKernelGGL(( volumerhs<Nq, Np>) , dim3(G), dim3(B3) , 0, 0, c_rhs, c_Q, nelem);
hipDeviceSynchronize();
exit(0);
return 0;
}
|
b5857dc37a93bddb5b1582ea0e49d1dbf6d23a2b.cu
|
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
// to build on Titan V:
// nvcc -arch=sm_70 --ptxas-options=-v -o vanilladeriv vanilladeriv.cu;
#ifdef USE_DOUBLE
#define dfloat double
#else
#define dfloat float
#endif
#ifndef POLYNOMIAL_ORDER
#define POLYNOMIAL_ORDER 4
#endif
template <int Nq, int Np>
__global__ void volumerhs(dfloat * __restrict__ rhs,
const dfloat * __restrict__ Q,
const int nelem){
__shared__ dfloat s_F[Nq][Nq];
dfloat r_rhsR[Nq];
int e = blockIdx.x;
int j = threadIdx.y;
int i = threadIdx.x;
#pragma unroll Nq
for(int k=0;k<Nq;++k) r_rhsR[k] = 0;
#pragma unroll Nq
for(int k=0;k<Nq;++k){
__syncthreads();
int qid = i + j*Nq + k*Nq*Nq + e*Np;
s_F[i][j] = Q[qid];
__syncthreads();
#pragma unroll Nq
for(int n=0;n<Nq;++n){
r_rhsR[k] += s_F[n][j];
r_rhsR[k] += s_F[n][i];
r_rhsR[k] += s_F[j][n];
r_rhsR[k] += s_F[i][n];
}
}
#pragma unroll Nq
for(int k=0;k<Nq;++k){
int qid = i + j*Nq + k*Nq*Nq + e*Np;
rhs[qid] += r_rhsR[k];
}
}
void randArray(int N, dfloat base, dfloat range, dfloat **q, dfloat **c_q){
*q = (dfloat*) calloc(N, sizeof(dfloat));
cudaMalloc(c_q, N*sizeof(dfloat));
for(int n=0;n<N;++n){
q[0][n] = base + drand48()*range;
}
cudaMemcpy(c_q[0], q[0], N*sizeof(dfloat), cudaMemcpyHostToDevice);
}
int main(int argc, char **argv){
srand48(1234);
const int N = POLYNOMIAL_ORDER;
const int nelem = 4000;
const int Nq = N+1;
const int Np = Nq*Nq*Nq;
const int Ntotal = Np*nelem;
dfloat *Q, *c_Q;
randArray(Ntotal, 0., 1., &Q, &c_Q);
cudaMemcpy(c_Q, Q, nelem*Np*sizeof(dfloat), cudaMemcpyHostToDevice);
dfloat *rhs, *c_rhs;
srand48(1234);
randArray(Ntotal, 1., 1., &rhs, &c_rhs);
dim3 G(nelem,1,1);
dim3 B2(Nq,Nq,Nq);
dim3 B3(Nq,Nq,1);
volumerhs<Nq, Np> <<< G, B3 >>> (c_rhs, c_Q, nelem);
cudaDeviceSynchronize();
exit(0);
return 0;
}
|
cb6c31bb5b0a3170b7e39ff436a956011dbded6d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "initializer.h"
#include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
#include <hiprand/hiprand.h>
#include <random>
#include <ctime>
void UniformInitializer::init_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == task->regions.size());
UniformInitializer* initializer = (UniformInitializer*) task->args;
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
hiprandSetStream(gen, stream);
//fprintf(stderr, "seed = %d\n", initializer->seed);
for (size_t i = 0; i < regions.size(); i++) {
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[i].region.get_index_space());
float* w;
switch (domain.get_dim()) {
case 0:
{
// Do not support 0-dim parameters
assert(false);
break;
}
#define DIMFUNC(DIM) \
case DIM: \
{ \
TensorAccessorW<float, DIM> accW( \
regions[i], task->regions[i], FID_DATA, ctx, runtime, false/*readOutput*/); \
w = accW.ptr; \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
assert(false);
break;
}
}
hiprandSetPseudoRandomGeneratorSeed(gen, initializer->seed);
checkCUDA(hiprandGenerateUniform(gen, w, domain.get_volume()));
hipLaunchKernelGGL(( scale_kernel), dim3(GET_BLOCKS(domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0,
w, domain.get_volume(), initializer->min_val, initializer->max_val);
}
checkCUDA(hipDeviceSynchronize());
hiprandDestroyGenerator(gen);
}
void GlorotUniform::init_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 1);
assert(task->regions.size() == 1);
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
float* w;
float scale = 0;
switch (domain.get_dim()) {
case 2:
{
TensorAccessorW<float, 2> accW(regions[0], task->regions[0],
FID_DATA, ctx, runtime, false/*readOutput*/);
w = accW.ptr;
int outputDim = accW.rect.hi[1] - accW.rect.lo[1] + 1;
int inputDim = accW.rect.hi[0] - accW.rect.lo[0] + 1;
scale = sqrt(6.0 / (inputDim + outputDim));
break;
}
case 3:
{
TensorAccessorW<float, 3> accW(regions[0], task->regions[0],
FID_DATA, ctx, runtime, false/*readOutput*/);
w = accW.ptr;
// reference: tensorflow code for computing fan_in/fan_out
// https://github.com/tensorflow/tensorflow/blob/r2.0/tensorflow/python/ops/init_ops.py#L1415-L1439
int num_dim = domain.get_dim();
coord_t receptive_field_size = 1;
for (int i = 0; i < num_dim - 2; i++)
receptive_field_size *= (accW.rect.hi[i] - accW.rect.lo[i] + 1);
coord_t c_in = accW.rect.hi[num_dim-2] - accW.rect.lo[num_dim-2] + 1;
coord_t c_out = accW.rect.hi[num_dim-1] - accW.rect.lo[num_dim-1] + 1;
coord_t fan_in = c_in * receptive_field_size;
coord_t fan_out = c_out * receptive_field_size;
scale = sqrt(6.0 / (fan_in + fan_out));
break;
}
case 4:
{
TensorAccessorW<float, 4> accW(regions[0], task->regions[0],
FID_DATA, ctx, runtime, false/*readOutput*/);
w = accW.ptr;
// reference: tensorflow code for computing fan_in/fan_out
// https://github.com/tensorflow/tensorflow/blob/r2.0/tensorflow/python/ops/init_ops.py#L1415-L1439
int num_dim = domain.get_dim();
coord_t receptive_field_size = 1;
for (int i = 0; i < num_dim - 2; i++)
receptive_field_size *= (accW.rect.hi[i] - accW.rect.lo[i] + 1);
coord_t c_in = accW.rect.hi[num_dim-2] - accW.rect.lo[num_dim-2] + 1;
coord_t c_out = accW.rect.hi[num_dim-1] - accW.rect.lo[num_dim-1] + 1;
coord_t fan_in = c_in * receptive_field_size;
coord_t fan_out = c_out * receptive_field_size;
scale = sqrt(6.0 / (fan_in + fan_out));
break;
}
default:
assert(false);
}
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCURAND(hiprandSetStream(gen, stream));
#endif
GlorotUniform* initializer = (GlorotUniform*) task->args;
hiprandSetPseudoRandomGeneratorSeed(gen, initializer->seed);
fprintf(stderr, "seed = %d scale = %.4lf\n", initializer->seed, scale);
checkCUDA(hiprandGenerateUniform(gen, w, domain.get_volume()));
hipLaunchKernelGGL(( scale_kernel), dim3(GET_BLOCKS(domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0,
w, domain.get_volume(), -scale, scale);
checkCUDA(hipDeviceSynchronize());
hiprandDestroyGenerator(gen);
}
void NormInitializer::init_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 1);
assert(task->regions.size() == 1);
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
float* w;
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
TensorAccessorW<float, DIM> accW( \
regions[0], task->regions[0], FID_DATA, ctx, runtime, false/*readOutput*/); \
w = accW.ptr; \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCURAND(hiprandSetStream(gen, stream));
#endif
NormInitializer* initializer = (NormInitializer*) task->args;
//fprintf(stderr, "seed = %d\n", initializer->seed);
hiprandSetPseudoRandomGeneratorSeed(gen, initializer->seed);
//fprintf(stderr, "domain.volume() = %zu mean(%.4lf) var(%.4lf)\n",
// domain.get_volume(), initializer->mean, initializer->stddev);
// FIXME: it seems hiprand has an internal bug with volume < 4
// double check this later
if (domain.get_volume() < 4) {
std::default_random_engine generator;
std::normal_distribution<float> distribution(
initializer->mean, initializer->stddev);
float* w_dram = (float*) malloc(domain.get_volume() * sizeof(float));
for (size_t i = 0; i < domain.get_volume(); i++)
w_dram[i] = distribution(generator);
checkCUDA(hipMemcpy(w, w_dram, sizeof(float) * domain.get_volume(),
hipMemcpyHostToDevice));
checkCUDA(hipDeviceSynchronize());
free(w_dram);
} else {
checkCURAND(hiprandGenerateNormal(gen, w, domain.get_volume(),
initializer->mean, initializer->stddev));
checkCUDA(hipDeviceSynchronize());
}
hiprandDestroyGenerator(gen);
}
void ZeroInitializer::init_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == task->regions.size());
for (size_t i = 0; i < regions.size(); i++) {
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[i].region.get_index_space());
float* w;
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
TensorAccessorW<float, DIM> accW( \
regions[i], task->regions[i], FID_DATA, ctx, runtime, false/*readOutput*/); \
w = accW.ptr; \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
assert(false);
break;
}
}
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0,
w, domain.get_volume(), 0.0f);
}
checkCUDA(hipDeviceSynchronize());
}
void ConstantInitializer::init_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
ConstantInitializer* initializer = (ConstantInitializer*) task->args;
assert(regions.size() == task->regions.size());
for (size_t i = 0; i < regions.size(); i++) {
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[i].region.get_index_space());
float* w;
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
TensorAccessorW<float, DIM> accW( \
regions[i], task->regions[i], FID_DATA, ctx, runtime, false/*readOutput*/); \
w = accW.ptr; \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
assert(false);
break;
}
}
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0,
w, domain.get_volume(), initializer->value);
}
checkCUDA(hipDeviceSynchronize());
}
|
cb6c31bb5b0a3170b7e39ff436a956011dbded6d.cu
|
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "initializer.h"
#include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
#include <curand.h>
#include <random>
#include <ctime>
void UniformInitializer::init_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == task->regions.size());
UniformInitializer* initializer = (UniformInitializer*) task->args;
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
curandSetStream(gen, stream);
//fprintf(stderr, "seed = %d\n", initializer->seed);
for (size_t i = 0; i < regions.size(); i++) {
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[i].region.get_index_space());
float* w;
switch (domain.get_dim()) {
case 0:
{
// Do not support 0-dim parameters
assert(false);
break;
}
#define DIMFUNC(DIM) \
case DIM: \
{ \
TensorAccessorW<float, DIM> accW( \
regions[i], task->regions[i], FID_DATA, ctx, runtime, false/*readOutput*/); \
w = accW.ptr; \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
assert(false);
break;
}
}
curandSetPseudoRandomGeneratorSeed(gen, initializer->seed);
checkCUDA(curandGenerateUniform(gen, w, domain.get_volume()));
scale_kernel<<<GET_BLOCKS(domain.get_volume()), CUDA_NUM_THREADS>>>(
w, domain.get_volume(), initializer->min_val, initializer->max_val);
}
checkCUDA(cudaDeviceSynchronize());
curandDestroyGenerator(gen);
}
void GlorotUniform::init_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 1);
assert(task->regions.size() == 1);
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
float* w;
float scale = 0;
switch (domain.get_dim()) {
case 2:
{
TensorAccessorW<float, 2> accW(regions[0], task->regions[0],
FID_DATA, ctx, runtime, false/*readOutput*/);
w = accW.ptr;
int outputDim = accW.rect.hi[1] - accW.rect.lo[1] + 1;
int inputDim = accW.rect.hi[0] - accW.rect.lo[0] + 1;
scale = sqrt(6.0 / (inputDim + outputDim));
break;
}
case 3:
{
TensorAccessorW<float, 3> accW(regions[0], task->regions[0],
FID_DATA, ctx, runtime, false/*readOutput*/);
w = accW.ptr;
// reference: tensorflow code for computing fan_in/fan_out
// https://github.com/tensorflow/tensorflow/blob/r2.0/tensorflow/python/ops/init_ops.py#L1415-L1439
int num_dim = domain.get_dim();
coord_t receptive_field_size = 1;
for (int i = 0; i < num_dim - 2; i++)
receptive_field_size *= (accW.rect.hi[i] - accW.rect.lo[i] + 1);
coord_t c_in = accW.rect.hi[num_dim-2] - accW.rect.lo[num_dim-2] + 1;
coord_t c_out = accW.rect.hi[num_dim-1] - accW.rect.lo[num_dim-1] + 1;
coord_t fan_in = c_in * receptive_field_size;
coord_t fan_out = c_out * receptive_field_size;
scale = sqrt(6.0 / (fan_in + fan_out));
break;
}
case 4:
{
TensorAccessorW<float, 4> accW(regions[0], task->regions[0],
FID_DATA, ctx, runtime, false/*readOutput*/);
w = accW.ptr;
// reference: tensorflow code for computing fan_in/fan_out
// https://github.com/tensorflow/tensorflow/blob/r2.0/tensorflow/python/ops/init_ops.py#L1415-L1439
int num_dim = domain.get_dim();
coord_t receptive_field_size = 1;
for (int i = 0; i < num_dim - 2; i++)
receptive_field_size *= (accW.rect.hi[i] - accW.rect.lo[i] + 1);
coord_t c_in = accW.rect.hi[num_dim-2] - accW.rect.lo[num_dim-2] + 1;
coord_t c_out = accW.rect.hi[num_dim-1] - accW.rect.lo[num_dim-1] + 1;
coord_t fan_in = c_in * receptive_field_size;
coord_t fan_out = c_out * receptive_field_size;
scale = sqrt(6.0 / (fan_in + fan_out));
break;
}
default:
assert(false);
}
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCURAND(curandSetStream(gen, stream));
#endif
GlorotUniform* initializer = (GlorotUniform*) task->args;
curandSetPseudoRandomGeneratorSeed(gen, initializer->seed);
fprintf(stderr, "seed = %d scale = %.4lf\n", initializer->seed, scale);
checkCUDA(curandGenerateUniform(gen, w, domain.get_volume()));
scale_kernel<<<GET_BLOCKS(domain.get_volume()), CUDA_NUM_THREADS>>>(
w, domain.get_volume(), -scale, scale);
checkCUDA(cudaDeviceSynchronize());
curandDestroyGenerator(gen);
}
void NormInitializer::init_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 1);
assert(task->regions.size() == 1);
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
float* w;
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
TensorAccessorW<float, DIM> accW( \
regions[0], task->regions[0], FID_DATA, ctx, runtime, false/*readOutput*/); \
w = accW.ptr; \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCURAND(curandSetStream(gen, stream));
#endif
NormInitializer* initializer = (NormInitializer*) task->args;
//fprintf(stderr, "seed = %d\n", initializer->seed);
curandSetPseudoRandomGeneratorSeed(gen, initializer->seed);
//fprintf(stderr, "domain.volume() = %zu mean(%.4lf) var(%.4lf)\n",
// domain.get_volume(), initializer->mean, initializer->stddev);
// FIXME: it seems curand has an internal bug with volume < 4
// double check this later
if (domain.get_volume() < 4) {
std::default_random_engine generator;
std::normal_distribution<float> distribution(
initializer->mean, initializer->stddev);
float* w_dram = (float*) malloc(domain.get_volume() * sizeof(float));
for (size_t i = 0; i < domain.get_volume(); i++)
w_dram[i] = distribution(generator);
checkCUDA(cudaMemcpy(w, w_dram, sizeof(float) * domain.get_volume(),
cudaMemcpyHostToDevice));
checkCUDA(cudaDeviceSynchronize());
free(w_dram);
} else {
checkCURAND(curandGenerateNormal(gen, w, domain.get_volume(),
initializer->mean, initializer->stddev));
checkCUDA(cudaDeviceSynchronize());
}
curandDestroyGenerator(gen);
}
void ZeroInitializer::init_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == task->regions.size());
for (size_t i = 0; i < regions.size(); i++) {
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[i].region.get_index_space());
float* w;
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
TensorAccessorW<float, DIM> accW( \
regions[i], task->regions[i], FID_DATA, ctx, runtime, false/*readOutput*/); \
w = accW.ptr; \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
assert(false);
break;
}
}
assign_kernel<<<GET_BLOCKS(domain.get_volume()), CUDA_NUM_THREADS>>>(
w, domain.get_volume(), 0.0f);
}
checkCUDA(cudaDeviceSynchronize());
}
void ConstantInitializer::init_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
ConstantInitializer* initializer = (ConstantInitializer*) task->args;
assert(regions.size() == task->regions.size());
for (size_t i = 0; i < regions.size(); i++) {
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[i].region.get_index_space());
float* w;
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
TensorAccessorW<float, DIM> accW( \
regions[i], task->regions[i], FID_DATA, ctx, runtime, false/*readOutput*/); \
w = accW.ptr; \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
assert(false);
break;
}
}
assign_kernel<<<GET_BLOCKS(domain.get_volume()), CUDA_NUM_THREADS>>>(
w, domain.get_volume(), initializer->value);
}
checkCUDA(cudaDeviceSynchronize());
}
|
2a020cb93d06ed482e4a98aa47accd38bbbdf425.hip
|
// !!! This is a file automatically generated by hipify!!!
//STL includes
#include <iostream>
#include <vector>
#include <time.h>
#include <cmath>
#include <chrono>
#include <iomanip>
#include <fstream>
#include "math_constants.h"
//Eigen includes
#include <Eigen/Dense>
#include <Eigen/Sparse>
//Boost
#include "boost/program_options.hpp"
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/path.hpp>
//My own includes
#include "global_params.h"
#include "prev_states.h"
#include "input_file_prep.h"
#include "BZ_CUDA_UTIL.h"
#include "attention_layer.h"
#include "attention_node.h"
#include "decoder_model_wrapper.h"
#include "ensemble_factory.h"
#include "base_layer.h"
#include "NCE.h"
#include "gpu_info_struct.h"
#include "custom_kernels.h"
#include "Hidden_To_Hidden_Layer.h"
#include "LSTM_HH.h"
#include "model.h"
#include "fileHelper.h"
#include "Eigen_Util.h"
#include "model.hpp"
#include "base_layer.hpp"
#include "LSTM.hpp"
#include "softmax.hpp"
#include "Input_To_Hidden_Layer.hpp"
#include "Hidden_To_Hidden_Layer.hpp"
#include "LSTM_HH.hpp"
#include "decoder_model_wrapper.hpp"
#include "ensemble_factory.hpp"
#include "attention_layer.hpp"
#include "attention_node.hpp"
#include "NCE.hpp"
//parse the command line from the user
void command_line_parse(global_params ¶ms,int argc, char **argv) {
//files for keeping the user input
//if not s, 1st source, 2nd target, 3rd output weights name
//if s, 1st target, 2nd output weights name
std::vector<std::string> train_files;
//files for force decoding
//if not s, 1. source input file 2. target input file 3. neural network file name 4. output file name
//if s, 1. target input file 2. neural network file name 3. output file name
std::vector<std::string> test_files;
//stuff for adaptive learning rate schedule
//if not seq , 1st is source dev, 2nd is target dev
//if seq 1st is target dev
std::vector<std::string> adaptive_learning_rate;
//lower and upper range for parameter initialization
std::vector<precision> lower_upper_range;
//for the kbest flag, 4 arguements must be entered for kbest, 1. number of best paths 2 input file name
//3. neural network file name (this is the output file you get after training the neural network)4. output file name
std::vector<std::string> kbest_files;
//for stoic gen, 1st neural network file, 2nd is output file name
std::vector<std::string> stoicgen_files;
//truncated softmax
std::vector<std::string> trunc_info;
//for decoding ratios
std::vector<precision> decoding_ratio;
//for continuing to train
std::vector<std::string> cont_train;
//for multi gpu training
std::vector<int> gpu_indicies;
//basic format setup
namespace po = boost::program_options;
po::options_description desc("Options");
desc.add_options()
("help,h", "Run to get help on how to use the program")
("train,t",po::value<std::vector<std::string> > (&train_files)->multitoken(),"Train a model with input data file(s) and a name for the neural network output file"\
". \nFORMAT (if sequence to sequence): <source file name> <target file name> <neural network output name> "\
" \nFORMAT (if sequence): <target file name> <neural network output name>")
("cont-train,C",po::value<std::vector<std::string>> (&cont_train)->multitoken(),"Resume training of a model (THIS WILL OVERWRITE THE MODEL FILE)\n"\
"FORMAT: (if sequence to sequence): <source file name> <target file name> <neural network file name>\n"\
"FORMAT: (if seq): <target file name> <neural network file name>")
("train-ensemble",po::value<std::string> (¶ms.ensemble_train_file_name),"Train a model with the same integerization mappings as another model. This is needed to doing ensemble decoding\n"\
"FORMAT: <neural network file name>")
("num-layers,N",po::value<int>(¶ms.num_layers),"Set the number of LSTM layers you want for your model\n DEFAULT:1")
("multi-gpu,M",po::value<std::vector<int>> (&gpu_indicies)->multitoken(), "Train the model on multiple gpus.\nFORMAT: <gpu for layer 1> <gpu for layer 2> ... <gpu for softmax>\n"\
"DEFAULT: all layers and softmax lie on gpu 0")
("force-decode,f",po::value<std::vector<std::string> > (&test_files)->multitoken(), "Get per line probability of dataset plus the perplexity\n"\
"FORMAT: (if sequence to sequence): <source file name> <target file name> <trained neural network file name> <output file name>\n"\
"FORMAT: (if sequence): <target file name> <trained neural network file name> <output file name>")
("stoch-gen,g", po::value<std::vector<std::string> > (&stoicgen_files)->multitoken(),"Do random generation for a sequence model, such as a language model\n"\
"FORMAT: <neural network file name> <output file name>")
("stoch-gen-len",po::value<int>(¶ms.sg_length) ,"How many sentences to let stoch-gen run for\n"\
"FORMAT: <num sentences>\n"
"DEFAULT: 100")
("dump-alignments",po::value<bool>(¶ms.attent_params.dump_alignments),"Dump the alignments to a file")
("temperature",po::value<double>(¶ms.temperature) ,"What should the temperature be for the stoch generation"\
"FORMAT: <temperature> where temperature is typically between [0,1]. A lower temperature makes the model output less and less from what it memorized from training\n"\
"DEFAULT: 1")
("sequence,s", "Train model that learns a sequence,such as language modeling. Default model is sequence to sequence model")
("carve_sequence_data",po::value<int>(¶ms.backprop_len),"For sequence models, train the data as if the data was one long sequence. DEAFUL: <false>")
("dropout,d",po::value<precision>(¶ms.dropout_rate),"Use dropout and set the dropout rate. This value is the probability of keeping a node.\nFORMAT: <dropout rate>\n DEFAULT: not used")
("learning-rate,l",po::value<precision>(¶ms.learning_rate),"Set the learning rate\n DEFAULT: 0.7")
("random-seed",po::value<bool>(¶ms.random_seed),"Use a random seed instead of a faxed one\n")
("longest-sent,L",po::value<int>(¶ms.longest_sent),"Set the maximum sentence length for training.\n DEFAULT: 100")
("hiddenstate-size,H",po::value<int>(¶ms.LSTM_size),"Set hiddenstate size \n DEFAULT: 1000")
("UNK-replacement",po::value<int>(¶ms.unk_aligned_width),"Set unk replacement to be true and set the wideth\n FORMAT: <alignment width>")
("truncated-softmax,T",po::value<std::vector<std::string>> (&trunc_info)->multitoken(),"Use truncated softmax\n DEFAULT: not being used\n"\
"FORMAT: <shortlist size> <sampled size>")
("NCE",po::value<int>(¶ms.num_negative_samples),"Use an NCE loss function, specify the number of noise samples you want (these are shared across the minibatch for speed)")
("attention-model",po::value<bool>(¶ms.attent_params.attention_model),"Bool for whether you want to train with the attention mode\n")
("attention-width",po::value<int>(¶ms.attent_params.D),"How many words do you want to look at around the alignment position on one half, default 10\n")
("feed_input",po::value<bool>(¶ms.attent_params.feed_input),"Bool for wether you want feed input for the attention model\n")
("source-vocab,v",po::value<int>(¶ms.source_vocab_size),"Set source vocab size\n DEFAULT: number of unique words in source training corpus")
("target-vocab,V",po::value<int>(¶ms.target_vocab_size),"Set target vocab size\n DEFAULT: number of unique words in target training corpus")
("shuffle",po::value<bool>(¶ms.shuffle),"true if you want to shuffle the train data\n DEFAULT: true")
("parameter-range,P",po::value<std::vector<precision> > (&lower_upper_range)->multitoken(),"parameter initialization range\n"\
"FORMAT: <Lower range value> <Upper range value>\n DEFAULT: -0.08 0.08")
("number-epochs,n",po::value<int>(¶ms.num_epochs),"Set number of epochs\n DEFAULT: 10")
("matrix-clip-gradients,c",po::value<precision>(¶ms.norm_clip),"Set gradient clipping threshold\n DEFAULT: 5")
("ind-clip-gradients,i",po::value<precision>(&BZ_CUDA::ind_norm_clip_thres),"Set gradient clipping threshold for individual elements\n DEFAULT: 0.1")
("whole-clip-gradients,w",po::value<precision>(¶ms.norm_clip),"Set gradient clipping threshold for all gradients\n DEFAULT: 5")
("adaptive-halve-lr,a",po::value<std::vector<std::string>> (&adaptive_learning_rate)->multitoken(),"change the learning rate"\
" when the perplexity on your specified dev set decreases from the previous half epoch by some constant, so "\
" new_learning_rate = constant*old_learning rate, by default the constant is 0.5, but can be set using adaptive-decrease-factor\n"
"FORMAT: (if sequence to sequence): <source dev file name> <target dev file name>\n"\
"FORMAT: (if sequence): <target dev file name>")
("adaptive-decrease-factor,A",po::value<precision>(¶ms.decrease_factor),"To be used with adaptive-halve-lr"\
" it\n DEFAULT: 0.5")
("fixed-halve-lr",po::value<int> (¶ms.epoch_to_start_halving),"Halve the learning rate"\
" after a certain epoch, every half epoch afterwards by a specific amount")
("fixed-halve-lr-full",po::value<int> (¶ms.epoch_to_start_halving_full),"Halve the learning rate"\
" after a certain epoch, every epoch afterwards by a specific amount")
("minibatch-size,m",po::value<int>(¶ms.minibatch_size),"Set minibatch size\n DEFAULT: 128")
("screen-print-rate",po::value<int>(¶ms.screen_print_rate),"Set after how many minibatched you want to print training info to the screen\n DEFAULT: 5")
("HPC-output",po::value<std::string>(¶ms.HPC_output_file_name),"Use if you want to have the terminal output also be put to a" \
"file \n FORMAT: <file name>")
("best-model,B",po::value<std::string>(¶ms.best_model_file_name),"During train have the best model be written to a file\nFORMAT: <output file name>")
("kbest,k",po::value<std::vector<std::string> > (&kbest_files)->multitoken(),"Get k best paths in sequence to sequence model. You can specify more than one model for ensemble decoding\n"\
"FORMAT: <how many paths> <source file name> <neural network file name 1> <neural network file name 2> ... <output file name>")
("beam-size,b",po::value<int>(¶ms.beam_size),"Set beam size for kbest paths\n DEFAULT: 12")
("penalty,p",po::value<precision>(¶ms.penalty),"Set penalty for kbest decoding. The value entered"\
" will be added to the log probability score per target word decoded. This can make the model favor longer sentences for decoding\n DEFAULT: 0")
("print-score",po::value<bool>(¶ms.print_score),"Set if you want to print out the unnormalized log prob for each path "\
"FORMAT: <bool> \nthe bool is 1 if you want to print the score or 0 otherwise.\n DEFAULT: false")
("dec-ratio",po::value<std::vector<precision>>(&decoding_ratio)->multitoken(),"Set the min and max decoding length rations\n"\
"This means that a target decoded sentence must be at least min_dec_ratio*len(source sentence)"\
" and not longer than max_dec_ratio*len(source sentence)\nFORMAT: <min ration> <max ratio>\n"\
"DEFAULT: 0.5, 1.5")
("Dump-LSTM",po::value<std::string>(¶ms.LSTM_dump_file),"Print the output at each timestep from the LSTM\nFORMAT: <output file name>\n"\
"The file lines that are output are the following: 1.input word, embedding 2.Forget gate 3.input gate"\
" 4.c_t 5.output gate 6.h_t 7.probabilities");
po::variables_map vm;
try {
po::store(po::parse_command_line(argc, argv, desc), vm);
po::notify(vm);
//see if the user specified the help flag
if ( vm.count("help") ) {
std::cout << "\n------------------------------\n";
std::cout << "This is Barret Zoph's GPU RNN library\n"
<< "The flags for the command line interface are below\n"
<< "" << "\n";
std::cout << desc << "\n";
exit (EXIT_FAILURE);
}
//error checks to be sure only once of these options is set
if (vm.count("train") && vm.count("kbest")) {
std::cout << "ERROR: you cannot train and get kbest at the same time\n";
exit (EXIT_FAILURE);
}
if (vm.count("train") && vm.count("force-decode")) {
std::cout << "ERROR: you cannot train and force-decode at the same time\n";
exit (EXIT_FAILURE);
}
if (vm.count("force-decode") && vm.count("kbest")) {
std::cout << "ERROR: you cannot force-decode and get kbest at the same time\n";
exit (EXIT_FAILURE);
}
if (!(vm.count("train") || vm.count("force-decode") || vm.count("kbest")||vm.count("stoch-gen") || vm.count("cont-train") )) {
std::cout << "ERROR: you must either train,continue training,get kbest,stoch generate data or force-decode\n";
exit (EXIT_FAILURE);
}
params.longest_sent+=4; //because it is really 4 less
if(vm.count("train") || vm.count("cont-train")) {
//some basic error checks to parameters
if(params.learning_rate<=0) {
std::cout << "ERROR: you cannot have a learning rate <=0\n";
exit (EXIT_FAILURE);
}
if(params.minibatch_size<=0) {
std::cout << "ERROR: you cannot have a minibatch of size <=0\n";
exit (EXIT_FAILURE);
}
if(params.LSTM_size<=0) {
std::cout << "ERROR: you cannot have a hiddenstate of size <=0\n";
exit (EXIT_FAILURE);
}
if(params.source_vocab_size<=0) {
if(params.source_vocab_size!=-1) {
std::cout << "ERROR: you cannot have a source_vocab_size <=0\n";
exit (EXIT_FAILURE);
}
}
if(params.target_vocab_size<=0) {
if(params.target_vocab_size!=-1) {
std::cout << "ERROR: you cannot have a target_vocab_size <=0\n";
exit (EXIT_FAILURE);
}
}
if(params.norm_clip<=0) {
std::cout << "ERROR: you cannot have your norm clip <=0\n";
exit (EXIT_FAILURE);
}
if(params.num_epochs<=0) {
std::cout << "ERROR: you cannot have num_epochs <=0\n";
exit (EXIT_FAILURE);
}
if(vm.count("HPC-output")) {
params.HPC_output = true;
}
if(vm.count("dropout")) {
params.dropout = true;
if(params.dropout_rate < 0 || params.dropout_rate > 1) {
std::cout << "ERROR: dropout rate must be between 0 and 1\n";
exit (EXIT_FAILURE);
}
}
if(vm.count("matrix-clip-gradients")) {
BZ_CUDA::global_clip_flag = false;
params.clip_gradient = true;
BZ_CUDA::individual_grad_clip = false;
}
if(vm.count("whole-clip-gradients")) {
BZ_CUDA::global_clip_flag = true;
params.clip_gradient = false;
BZ_CUDA::individual_grad_clip = false;
}
if(vm.count("ind-clip-gradients")) {
BZ_CUDA::global_clip_flag = false;
params.clip_gradient = false;
BZ_CUDA::individual_grad_clip = true;
}
if(vm.count("NCE")) {
params.NCE = true;
params.softmax = false;
BZ_CUDA::print_partition_function = true;
}
if(vm.count("UNK-replacement")) {
params.unk_replace = true;
}
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.train_file_name = params.unique_dir+"/train.txt";
//number of layers
//error checking is done when initializing model
if(vm.count("multi-gpu")) {
params.gpu_indicies = gpu_indicies;
}
if(vm.count("cont-train")) {
//sequence model
if(vm.count("sequence")) {
if(cont_train.size()!=2) {
std::cout << cont_train.size() << "\n";
std::cout << "ERROR: two arguements to be supplied to the continue train flag\n"\
" 1. train data file name, 2. neural network file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.attent_params.attention_model = false;
params.target_file_name = cont_train[0];
params.input_weight_file = cont_train[1];
params.output_weight_file = cont_train[1];
params.LM = true;
params.load_model_train = true;
params.load_model_name = params.input_weight_file;
input_file_prep input_helper;
input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.train_file_name,
params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size,params.num_layers);
}
else {
if(cont_train.size()!=3) {
std::cout << "ERROR: three arguements to be supplied to the continue train flag\n"\
" 1. source train data file name 2. target train data file name 3. neural network file name \n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = cont_train[0];
params.target_file_name = cont_train[1];
params.input_weight_file = cont_train[2];
params.output_weight_file = cont_train[2];
params.load_model_train = true;
params.load_model_name = params.input_weight_file;
if(params.source_file_name == params.target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name,
params.target_file_name,params.train_file_name,params.longest_sent,params.minibatch_size,params.LSTM_size,
params.source_vocab_size,params.target_vocab_size,params.num_layers);
}
}
else {
if(vm.count("num-layers")) {
if(params.num_layers <=0) {
std::cout << "ERROR: you must have >= 1 layer for your model\n";
exit (EXIT_FAILURE);
}
}
//now create the necessary files
if(vm.count("sequence")) {
if(train_files.size()!=2) {
std::cout << "ERROR: two arguements to be supplied to the train flag"\
" 1. train data file name, 2. neural network output name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.attent_params.attention_model = false;
params.LM = true;
params.target_file_name = train_files[0];
params.output_weight_file = train_files[1];
input_file_prep input_helper;
if(vm.count("train-ensemble")) {
params.ensemble_train = true;
}
//this outputs the train.txt file along with the mappings and first line
bool success=true;
if(!params.ensemble_train) {
if(vm.count("carve_sequence_data")) {
params.carve_data = true;
input_helper.prep_files_train_LM_carve(params.minibatch_size,
params.target_file_name,
params.train_file_name,params.target_vocab_size,
params.output_weight_file,params.LSTM_size,params.num_layers,
params.backprop_len);
}
else {
success = input_helper.prep_files_train_LM(params.minibatch_size,params.longest_sent,
params.target_file_name,
params.train_file_name,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers);
}
}
else {
success = input_helper.prep_files_train_LM_ensemble(params.minibatch_size,params.longest_sent,
params.target_file_name,
params.train_file_name,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.ensemble_train_file_name);
}
//clean up if error
if(!success) {
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
else {
//then sequence to sequence model
if(train_files.size()!=3) {
std::cout << train_files.size() <<"\n";
std::cout << "ERROR: three arguements to be supplied to the train flag for the sequence to sequence model\n"\
" 1. source train data file name\n 2. target train data file name \n3. neural network output name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = train_files[0];
params.target_file_name = train_files[1];
params.output_weight_file = train_files[2];
if(params.source_file_name == params.target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
//see if ensemble training
if(vm.count("train-ensemble")) {
params.ensemble_train = true;
}
input_file_prep input_helper;
bool success=true;
if(!params.ensemble_train) {
success = input_helper.prep_files_train_nonLM(params.minibatch_size,params.longest_sent,
params.source_file_name,params.target_file_name,
params.train_file_name,params.source_vocab_size,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.unk_replace,params.unk_aligned_width);
}
else {
success = input_helper.prep_files_train_nonLM_ensemble(params.minibatch_size,params.longest_sent,
params.source_file_name,params.target_file_name,
params.train_file_name,params.source_vocab_size,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.ensemble_train_file_name);
}
//clean up if error
if(!success) {
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
}
if(vm.count("parameter-range")) {
if(lower_upper_range.size()!=2) {
std::cout << "ERROR: you must have two inputs to parameter-range\n1.lower bound\n2. upper bound\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
BZ_CUDA::lower = lower_upper_range[0];
BZ_CUDA::upper = lower_upper_range[1];
if(BZ_CUDA::lower >= BZ_CUDA::upper) {
std::cout << "ERROR: the lower parameter range cannot be greater than the upper range\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
if(vm.count("fixed-halve-lr-full")) {
params.stanford_learning_rate = true;
}
if(vm.count("fixed-halve-lr")) {
params.google_learning_rate = true;
if(params.epoch_to_start_halving<=0) {
std::cout << "ERROR: cannot halve learning rate until 1st epoch \n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
if(vm.count("adaptive-halve-lr")) {
params.learning_rate_schedule = true;
if(vm.count("sequence")) {
if(adaptive_learning_rate.size()!=1) {
std::cout << "ERROR: adaptive-halve-lr takes one arguement\n1.dev file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.dev_target_file_name = adaptive_learning_rate[0];
params.test_file_name = params.unique_dir + "/validation.txt";
input_file_prep input_helper;
input_helper.integerize_file_LM(params.output_weight_file,params.dev_target_file_name,params.test_file_name,
params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size,params.num_layers);
}
else {
if(adaptive_learning_rate.size()!=2) {
std::cout << "ERROR: adaptive-halve-lr takes two arguements\n1.source dev file name\n2.target dev file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.dev_source_file_name = adaptive_learning_rate[0];
params.dev_target_file_name = adaptive_learning_rate[1];
params.test_file_name = params.unique_dir + "/validation.txt";
if(params.dev_source_file_name == params.dev_target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.output_weight_file,params.dev_source_file_name,
params.dev_target_file_name,params.test_file_name,
params.longest_sent,params.minibatch_size,params.LSTM_size,params.source_vocab_size,params.target_vocab_size,params.num_layers);
}
if(vm.count("best-model")) {
params.best_model = true;
}
}
if(vm.count("truncated-softmax")) {
params.shortlist_size = std::stoi(trunc_info[0]);
params.sampled_size = std::stoi(trunc_info[1]);
params.truncated_softmax = true;
if(params.shortlist_size + params.sampled_size > params.target_vocab_size) {
std::cout << "ERROR: you cannot have shortlist size + sampled size >= target vocab size\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
params.train= true;
params.decode=false;
params.test = false;
params.stochastic_generation = false;
return;
}
if(vm.count("kbest")) {
if (kbest_files.size()<4) {
std::cout << "ERROR: at least 4 arguements must be entered for kbest, 1. number of best paths\n"\
" 2 input file name \n"
" 3. neural network file name (this is the output file you get after training the neural network)\n"\
" 4. output file name\n"\
"Additionally more neural network file names can be added to do ensemble decoding\n";
exit (EXIT_FAILURE);
}
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
//for ensembles
std::vector<std::string> model_names;
for(int i=2; i<kbest_files.size()-1; i++) {
model_names.push_back(kbest_files[i]);
}
params.model_names = model_names;
params.decode_file_name = params.unique_dir+"/decoder_input.txt";
params.decoder_output_file = params.unique_dir+"/decoder_output.txt";
params.num_hypotheses =std::stoi(kbest_files[0]);
params.decode_tmp_file = kbest_files[1];
params.input_weight_file = model_names[0];
params.decoder_final_file = kbest_files.back();
input_file_prep input_helper;
// input_helper.integerize_file_LM(params.input_weight_file,params.decode_tmp_file,"tmp/decoder_input.txt",
// params.longest_sent,1,false,params.LSTM_size,params.target_vocab_size,true,params.source_vocab_size);
input_helper.integerize_file_kbest(params.input_weight_file,params.decode_tmp_file,params.decode_file_name,
params.longest_sent,params.LSTM_size,params.target_vocab_size,params.source_vocab_size,params.num_layers);
if(vm.count("multi-gpu")) {
if(gpu_indicies.size()!=model_names.size()) {
std::cout << "ERROR: for decoding, each model must be specified a gpu\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.gpu_indicies = gpu_indicies;
}
else {
for(int i=0; i<model_names.size(); i++) {
params.gpu_indicies.push_back(0);
}
}
if(params.beam_size<=0) {
std::cout << "ERROR: beam size cannot be <=0\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
if(params.penalty<0) {
std::cout << "ERROR: penalty cannot be less than zero\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
if(vm.count("Dump-LSTM")) {
params.dump_LSTM=true;
}
if(vm.count("dec-ratio")) {
if(decoding_ratio.size()!=2) {
std::cout << "Decoding ratio size: " << decoding_ratio.size() << "\n";
std::cout << decoding_ratio[0] << "\n";
std::cout << "ERROR: only two inputs for decoding ratio\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.min_decoding_ratio = decoding_ratio[0];
params.max_decoding_ratio = decoding_ratio[1];
if(params.min_decoding_ratio >= params.max_decoding_ratio) {
std::cout << "ERROR: min decoding ratio must be <= max_decoding_ratio\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
params.train = false;
params.decode = true;
params.test = false;
params.stochastic_generation = false;
params.LM = false;
return;
}
if(vm.count("force-decode")) {
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.test_file_name = params.unique_dir + "/validation.txt";
if(vm.count("sequence")) {
if(test_files.size()!=3) {
std::cout << "ERROR: force-decode takes three arguements 1.input file name (input sentences)"\
"2. neural network file name 3.output file name \n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.attent_params.attention_model = false;
params.target_file_name = test_files[0];
params.input_weight_file = test_files[1];
params.output_force_decode = test_files[2];
params.LM = true;
input_file_prep input_helper;
input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.test_file_name,
params.longest_sent,1,false,params.LSTM_size,params.target_vocab_size,params.num_layers);
}
else {
if(test_files.size()!=4) {
std::cout << "ERROR: force-decode takes four arguements: 1. source input file"\
" 2. target input file 3. neural network file name 4. output file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = test_files[0];
params.target_file_name = test_files[1];
params.input_weight_file = test_files[2];
params.output_force_decode = test_files[3];
//stuff for attention model alignments
params.attent_params.tmp_alignment_file = params.unique_dir + "/alignments.txt";
if(params.source_file_name == params.target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name,
params.target_file_name,params.test_file_name,params.longest_sent,1,params.LSTM_size,
params.source_vocab_size,params.target_vocab_size,params.num_layers);
}
params.train= false;
params.decode=false;
params.test = true;
params.minibatch_size=1;
params.stochastic_generation = false;
return;
}
if(vm.count("stoch-gen")) {
if(!vm.count("sequence")) {
std::cout << "ERROR: you can only do stoch-gen on the sequence model\n";
exit (EXIT_FAILURE);
}
if(stoicgen_files.size()!=2) {
std::cout << "ERROR: stoch-gen takes two inputs"\
" 1. neural network file name 2. output file name\n";
exit (EXIT_FAILURE);
}
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.sg_output_file_temp = params.unique_dir + "/sg.txt";
params.input_weight_file = stoicgen_files[0];
params.sg_output_file = stoicgen_files[1];
std::ifstream weights_file;
std::vector<std::string> info;
std::string str;
std::string word;
weights_file.open(params.input_weight_file.c_str());
weights_file.seekg(0, std::ios::beg);
std::getline(weights_file, str); //info from first sentence
std::istringstream iss(str, std::istringstream::in);
while(iss >> word) {
info.push_back(word);
}
weights_file.close();
params.LSTM_size = std::stoi(info[1]);
params.target_vocab_size = std::stoi(info[2]);
params.LM = true;
params.train= false;
params.decode = false;
params.test = false;
params.minibatch_size = 1;
params.stochastic_generation = true;
return;
}
}
catch(po::error& e) {
std::cerr << "ERROR: " << e.what() << std::endl << std::endl;
//std::cerr << desc << std::endl;
exit (EXIT_FAILURE);
}
}
int main(int argc, char **argv) {
//Timing stuff
std::chrono::time_point<std::chrono::system_clock> start_total,
end_total, begin_minibatch,end_minibatch,begin_decoding,end_decoding;
std::chrono::duration<double> elapsed_seconds;
start_total = std::chrono::system_clock::now();
//Initializing the model
global_params params; //Declare all of the global parameters
//create tmp directory if it does not exist already
// if( !(boost::filesystem::exists("tmp/"))) {
// std::cout << "Creating tmp directory for program\n";
// boost::filesystem::create_directory("tmp/");
// }
//file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file); //Initialize the file information
//get the command line arguements
command_line_parse(params,argc,argv);
//randomize the seed
if(params.random_seed) {
BZ_CUDA::gen.seed(static_cast<unsigned int>(std::time(0)));
}
neuralMT_model<precision> model; //This is the model
params.printIntroMessage();
if(!params.decode) {
model.initModel(params.LSTM_size,params.minibatch_size,params.source_vocab_size,params.target_vocab_size,
params.longest_sent,params.debug,params.learning_rate,params.clip_gradient,params.norm_clip,
params.input_weight_file,params.output_weight_file,params.softmax_scaled,params.train_perplexity,params.truncated_softmax,
params.shortlist_size,params.sampled_size,params.LM,params.num_layers,params.gpu_indicies,params.dropout,
params.dropout_rate,params.attent_params,params);
}
if(params.load_model_train) {
std::string temp_swap_weights = model.input_weight_file;
model.input_weight_file = params.load_model_name;
model.load_weights();
model.input_weight_file = temp_swap_weights;
}
std::ofstream HPC_output;
if(params.HPC_output) {
HPC_output.open("HPC_OUTPUT.txt");
}
////////////////////////////////////Train the model//////////////////////////////////////
if(params.train) {
//info for averaging the speed
int curr_batch_num_SPEED = 0;
const int thres_batch_num_SPEED = params.screen_print_rate;//set this to whatever
int total_words_batch_SPEED = 0;
double total_batch_time_SPEED = 0;
//File info for the training file
file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,params.train_total_words,params.truncated_softmax,
params.shortlist_size,params.sampled_size); //Initialize the file information
//model.initFileInfo(&file_info);
params.half_way_count = params.train_total_words/2;
if(params.google_learning_rate) {
std::cout << "Words at which to start halving the learning rate: " << params.half_way_count << "\n";
if(params.HPC_output) {
HPC_output << "Words at which to start halving the learning rate: " << params.half_way_count << "\n";
HPC_output.flush();
}
}
int current_epoch = 1;
std::cout << "Starting model training\n";
std::cout << "Starting epoch 1\n";
if(params.HPC_output) {
HPC_output << "Starting model training\n";
HPC_output << "Starting epoch 1\n";
HPC_output.flush();
}
//stuff for learning rate schedule
int total_words = 0;
precision temp_learning_rate = params.learning_rate; //This is only for the google learning rate
bool learning_rate_flag =true;//used for google learning rate for halving at every 0.5 epochs
double old_perplexity = 0;
model.train_perplexity = 0; //set the model perplexity to zero
while(current_epoch <= params.num_epochs) {
begin_minibatch = std::chrono::system_clock::now();
bool success = file_info.read_minibatch();
end_minibatch = std::chrono::system_clock::now();
elapsed_seconds = end_minibatch-begin_minibatch;
//std::cout << "File I/O time: " << elapsed_seconds.count()/60.0 << " minutes\n";
total_batch_time_SPEED+= elapsed_seconds.count();
begin_minibatch = std::chrono::system_clock::now();
//hipProfilerStart();
model.initFileInfo(&file_info);
model.compute_gradients(file_info.minibatch_tokens_source_input,file_info.minibatch_tokens_source_output,
file_info.minibatch_tokens_target_input,file_info.minibatch_tokens_target_output,
file_info.h_input_vocab_indicies_source,file_info.h_output_vocab_indicies_source,
file_info.h_input_vocab_indicies_target,file_info.h_output_vocab_indicies_target,
file_info.current_source_length,file_info.current_target_length,
file_info.h_input_vocab_indicies_source_Wgrad,file_info.h_input_vocab_indicies_target_Wgrad,
file_info.len_source_Wgrad,file_info.len_target_Wgrad,file_info.h_sampled_indices,
file_info.len_unique_words_trunc_softmax,file_info.h_batch_info);
//hipProfilerStop();
//return;
// return 0;
end_minibatch = std::chrono::system_clock::now();
elapsed_seconds = end_minibatch-begin_minibatch;
total_batch_time_SPEED+= elapsed_seconds.count();
total_words_batch_SPEED+=file_info.words_in_minibatch;
if(curr_batch_num_SPEED>=thres_batch_num_SPEED) {
std::cout << "Recent batch gradient L2 norm size: " << BZ_CUDA::global_norm << "\n";
std::cout << "Batched Minibatch time: " << total_batch_time_SPEED/60.0 << " minutes\n";
std::cout << "Batched Words in minibatch: " << total_words_batch_SPEED << "\n";
std::cout << "Batched Throughput: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n";
std::cout << total_words << " out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n";
if(params.HPC_output) {
HPC_output << "Recent batch gradient L2 norm size: " << BZ_CUDA::global_norm << "\n";
HPC_output << "Batched Minibatch time: " << total_batch_time_SPEED/60.0 << " minutes\n";
HPC_output << "Batched Words in minibatch: " << total_words_batch_SPEED << "\n";
HPC_output << "Batched Throughput: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n";
HPC_output << total_words << " out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n";
HPC_output.flush();
}
total_words_batch_SPEED = 0;
total_batch_time_SPEED = 0;
curr_batch_num_SPEED = 0;
}
curr_batch_num_SPEED++;
total_words += file_info.words_in_minibatch;
//stuff for google learning rate
if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving && total_words>=params.half_way_count &&
learning_rate_flag) {
temp_learning_rate = temp_learning_rate/2;
std::cout << "New Learning Rate: " << temp_learning_rate << "\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = false;
if(params.HPC_output) {
HPC_output << "New Learning Rate: " << temp_learning_rate << "\n";
HPC_output.flush();
}
}
//stuff for perplexity based learning schedule
if(params.learning_rate_schedule && total_words>=params.half_way_count &&learning_rate_flag) {
learning_rate_flag = false;
double new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,HPC_output,false,params.test_total_words,params.HPC_output,false,"");
std::cout << "Old dev set Perplexity: " << old_perplexity << "\n";
std::cout << "New dev set Perplexity: " << new_perplexity << "\n";
if(params.HPC_output) {
HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n";
HPC_output << "New dev set Perplexity: " << new_perplexity << "\n";
HPC_output.flush();
}
if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) {
temp_learning_rate = temp_learning_rate*params.decrease_factor;
model.update_learning_rate(temp_learning_rate);
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
}
//perplexity is better so output the best model file
if(params.best_model && params.best_model_perp > new_perplexity) {
std::cout << "Now outputting the new best model\n";
model.dump_best_model(params.best_model_file_name,params.output_weight_file);
if(params.HPC_output) {
HPC_output << "Now outputting the new best model\n";
HPC_output.flush();
}
params.best_model_perp = new_perplexity;
}
old_perplexity = new_perplexity;
}
if(!success) {
current_epoch+=1;
//stuff for google learning rate schedule
if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving) {
temp_learning_rate = temp_learning_rate/2;
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = true;
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
}
//stuff for stanford learning rate schedule
if(params.stanford_learning_rate && current_epoch>=params.epoch_to_start_halving_full) {
temp_learning_rate = temp_learning_rate/2;
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = true;
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
}
double new_perplexity;
if(params.learning_rate_schedule) {
new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,HPC_output,false,params.test_total_words,params.HPC_output,false,"");
}
//stuff for perplexity based learning schedule
if(params.learning_rate_schedule) {
std::cout << "Old dev set Perplexity: " << old_perplexity << "\n";
std::cout << "New dev set Perplexity: " << new_perplexity << "\n";
if(params.HPC_output) {
HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n";
HPC_output << "New dev set Perplexity: " << new_perplexity << "\n";
HPC_output.flush();
}
if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) {
temp_learning_rate = temp_learning_rate*params.decrease_factor;
model.update_learning_rate(temp_learning_rate);
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
}
//perplexity is better so output the best model file
if(params.best_model && params.best_model_perp > new_perplexity) {
std::cout << "Now outputting the new best model\n";
model.dump_best_model(params.best_model_file_name,params.output_weight_file);
if(params.HPC_output) {
HPC_output << "Now outputting the new best model\n";
HPC_output.flush();
}
params.best_model_perp = new_perplexity;
}
learning_rate_flag = true;
old_perplexity = new_perplexity;
}
if(params.train_perplexity) {
model.train_perplexity = model.train_perplexity/::log(2.0);
std::cout << "PData on train set:" << model.train_perplexity << "\n";
std::cout << "Total target words: " << file_info.total_target_words << "\n";
std::cout << "Training set perplexity: " << ::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n";
if(params.HPC_output) {
HPC_output << "Training set perplexity: " << ::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n";
HPC_output.flush();
}
model.train_perplexity = 0;
}
total_words=0;
if(current_epoch <= params.num_epochs) {
std::cout << "-----------------------------------" << std::endl;
std::cout << "Starting epoch " << current_epoch << std::endl;
std::cout << "-----------------------------------" << std::endl;
if(params.HPC_output) {
HPC_output << "-----------------------------------" << std::endl;
HPC_output << "Starting epoch " << current_epoch << std::endl;
HPC_output << "-----------------------------------" << std::endl;
HPC_output.flush();
}
}
}
devSynchAll();
}
//Now that training is done, dump the weights
devSynchAll();
model.dump_weights();
}
/////////////////////////////////Get perplexity on test set////////////////////////////////
if(params.test) {
model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,HPC_output,true,params.test_total_words,params.HPC_output,true,params.output_force_decode);
//now unint alignments
if(model.attent_params.dump_alignments) {
input_file_prep input_helper;
model.output_alignments.close();
input_helper.unint_alignments(params.input_weight_file,params.attent_params.tmp_alignment_file,params.attent_params.alignment_file);
}
}
if(params.LM && params.stochastic_generation) {
model.stoicastic_generation(params.sg_length,params.sg_output_file_temp,params.temperature);
input_file_prep input_helper;
input_helper.unint_file(params.input_weight_file,params.sg_output_file_temp,params.sg_output_file,true,false);
}
///////////////////////////////////////////decode the model////////////////////////////////////////////
if(params.decode) {
//std::cout << "-----------------Starting Decoding----------------\n";
begin_decoding = std::chrono::system_clock::now();
ensemble_factory<precision> ensemble_decode(params.model_names,params.num_hypotheses,params.beam_size, params.min_decoding_ratio,
params.penalty, params.longest_sent,params.decode_num_lines_in_file,params.print_score,
params.decoder_output_file,params.decode_file_name,
params.gpu_indicies,params.max_decoding_ratio,params.target_vocab_size,
params.dump_LSTM,params.LSTM_dump_file,params);
std::cout << "-----------------Starting Decoding----------------\n";
ensemble_decode.decode_file();
end_decoding = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end_decoding-begin_decoding;
std::cout << "Decoding time: " << elapsed_seconds.count()/60.0 << " minutes\n";
//now unintegerize the file
input_file_prep input_helper;
input_helper.unint_file(params.input_weight_file,params.decoder_output_file,params.decoder_final_file,false,true);
}
//remove the temp directory created
if(params.unique_dir!="NULL") {
boost::filesystem::path temp_path(params.unique_dir);
//boost::filesystem::remove_all(temp_path);
}
//Compute the final runtime
end_total = std::chrono::system_clock::now();
elapsed_seconds = end_total-start_total;
std::cout << "\n\n\n";
std::cout << "Total Program Runtime: " << elapsed_seconds.count()/60.0 << " minutes" << std::endl;
}
|
2a020cb93d06ed482e4a98aa47accd38bbbdf425.cu
|
//STL includes
#include <iostream>
#include <vector>
#include <time.h>
#include <cmath>
#include <chrono>
#include <iomanip>
#include <fstream>
#include "math_constants.h"
//Eigen includes
#include <Eigen/Dense>
#include <Eigen/Sparse>
//Boost
#include "boost/program_options.hpp"
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/path.hpp>
//My own includes
#include "global_params.h"
#include "prev_states.h"
#include "input_file_prep.h"
#include "BZ_CUDA_UTIL.h"
#include "attention_layer.h"
#include "attention_node.h"
#include "decoder_model_wrapper.h"
#include "ensemble_factory.h"
#include "base_layer.h"
#include "NCE.h"
#include "gpu_info_struct.h"
#include "custom_kernels.h"
#include "Hidden_To_Hidden_Layer.h"
#include "LSTM_HH.h"
#include "model.h"
#include "fileHelper.h"
#include "Eigen_Util.h"
#include "model.hpp"
#include "base_layer.hpp"
#include "LSTM.hpp"
#include "softmax.hpp"
#include "Input_To_Hidden_Layer.hpp"
#include "Hidden_To_Hidden_Layer.hpp"
#include "LSTM_HH.hpp"
#include "decoder_model_wrapper.hpp"
#include "ensemble_factory.hpp"
#include "attention_layer.hpp"
#include "attention_node.hpp"
#include "NCE.hpp"
//parse the command line from the user
void command_line_parse(global_params ¶ms,int argc, char **argv) {
//files for keeping the user input
//if not s, 1st source, 2nd target, 3rd output weights name
//if s, 1st target, 2nd output weights name
std::vector<std::string> train_files;
//files for force decoding
//if not s, 1. source input file 2. target input file 3. neural network file name 4. output file name
//if s, 1. target input file 2. neural network file name 3. output file name
std::vector<std::string> test_files;
//stuff for adaptive learning rate schedule
//if not seq , 1st is source dev, 2nd is target dev
//if seq 1st is target dev
std::vector<std::string> adaptive_learning_rate;
//lower and upper range for parameter initialization
std::vector<precision> lower_upper_range;
//for the kbest flag, 4 arguements must be entered for kbest, 1. number of best paths 2 input file name
//3. neural network file name (this is the output file you get after training the neural network)4. output file name
std::vector<std::string> kbest_files;
//for stoic gen, 1st neural network file, 2nd is output file name
std::vector<std::string> stoicgen_files;
//truncated softmax
std::vector<std::string> trunc_info;
//for decoding ratios
std::vector<precision> decoding_ratio;
//for continuing to train
std::vector<std::string> cont_train;
//for multi gpu training
std::vector<int> gpu_indicies;
//basic format setup
namespace po = boost::program_options;
po::options_description desc("Options");
desc.add_options()
("help,h", "Run to get help on how to use the program")
("train,t",po::value<std::vector<std::string> > (&train_files)->multitoken(),"Train a model with input data file(s) and a name for the neural network output file"\
". \nFORMAT (if sequence to sequence): <source file name> <target file name> <neural network output name> "\
" \nFORMAT (if sequence): <target file name> <neural network output name>")
("cont-train,C",po::value<std::vector<std::string>> (&cont_train)->multitoken(),"Resume training of a model (THIS WILL OVERWRITE THE MODEL FILE)\n"\
"FORMAT: (if sequence to sequence): <source file name> <target file name> <neural network file name>\n"\
"FORMAT: (if seq): <target file name> <neural network file name>")
("train-ensemble",po::value<std::string> (¶ms.ensemble_train_file_name),"Train a model with the same integerization mappings as another model. This is needed to doing ensemble decoding\n"\
"FORMAT: <neural network file name>")
("num-layers,N",po::value<int>(¶ms.num_layers),"Set the number of LSTM layers you want for your model\n DEFAULT:1")
("multi-gpu,M",po::value<std::vector<int>> (&gpu_indicies)->multitoken(), "Train the model on multiple gpus.\nFORMAT: <gpu for layer 1> <gpu for layer 2> ... <gpu for softmax>\n"\
"DEFAULT: all layers and softmax lie on gpu 0")
("force-decode,f",po::value<std::vector<std::string> > (&test_files)->multitoken(), "Get per line probability of dataset plus the perplexity\n"\
"FORMAT: (if sequence to sequence): <source file name> <target file name> <trained neural network file name> <output file name>\n"\
"FORMAT: (if sequence): <target file name> <trained neural network file name> <output file name>")
("stoch-gen,g", po::value<std::vector<std::string> > (&stoicgen_files)->multitoken(),"Do random generation for a sequence model, such as a language model\n"\
"FORMAT: <neural network file name> <output file name>")
("stoch-gen-len",po::value<int>(¶ms.sg_length) ,"How many sentences to let stoch-gen run for\n"\
"FORMAT: <num sentences>\n"
"DEFAULT: 100")
("dump-alignments",po::value<bool>(¶ms.attent_params.dump_alignments),"Dump the alignments to a file")
("temperature",po::value<double>(¶ms.temperature) ,"What should the temperature be for the stoch generation"\
"FORMAT: <temperature> where temperature is typically between [0,1]. A lower temperature makes the model output less and less from what it memorized from training\n"\
"DEFAULT: 1")
("sequence,s", "Train model that learns a sequence,such as language modeling. Default model is sequence to sequence model")
("carve_sequence_data",po::value<int>(¶ms.backprop_len),"For sequence models, train the data as if the data was one long sequence. DEAFUL: <false>")
("dropout,d",po::value<precision>(¶ms.dropout_rate),"Use dropout and set the dropout rate. This value is the probability of keeping a node.\nFORMAT: <dropout rate>\n DEFAULT: not used")
("learning-rate,l",po::value<precision>(¶ms.learning_rate),"Set the learning rate\n DEFAULT: 0.7")
("random-seed",po::value<bool>(¶ms.random_seed),"Use a random seed instead of a faxed one\n")
("longest-sent,L",po::value<int>(¶ms.longest_sent),"Set the maximum sentence length for training.\n DEFAULT: 100")
("hiddenstate-size,H",po::value<int>(¶ms.LSTM_size),"Set hiddenstate size \n DEFAULT: 1000")
("UNK-replacement",po::value<int>(¶ms.unk_aligned_width),"Set unk replacement to be true and set the wideth\n FORMAT: <alignment width>")
("truncated-softmax,T",po::value<std::vector<std::string>> (&trunc_info)->multitoken(),"Use truncated softmax\n DEFAULT: not being used\n"\
"FORMAT: <shortlist size> <sampled size>")
("NCE",po::value<int>(¶ms.num_negative_samples),"Use an NCE loss function, specify the number of noise samples you want (these are shared across the minibatch for speed)")
("attention-model",po::value<bool>(¶ms.attent_params.attention_model),"Bool for whether you want to train with the attention mode\n")
("attention-width",po::value<int>(¶ms.attent_params.D),"How many words do you want to look at around the alignment position on one half, default 10\n")
("feed_input",po::value<bool>(¶ms.attent_params.feed_input),"Bool for wether you want feed input for the attention model\n")
("source-vocab,v",po::value<int>(¶ms.source_vocab_size),"Set source vocab size\n DEFAULT: number of unique words in source training corpus")
("target-vocab,V",po::value<int>(¶ms.target_vocab_size),"Set target vocab size\n DEFAULT: number of unique words in target training corpus")
("shuffle",po::value<bool>(¶ms.shuffle),"true if you want to shuffle the train data\n DEFAULT: true")
("parameter-range,P",po::value<std::vector<precision> > (&lower_upper_range)->multitoken(),"parameter initialization range\n"\
"FORMAT: <Lower range value> <Upper range value>\n DEFAULT: -0.08 0.08")
("number-epochs,n",po::value<int>(¶ms.num_epochs),"Set number of epochs\n DEFAULT: 10")
("matrix-clip-gradients,c",po::value<precision>(¶ms.norm_clip),"Set gradient clipping threshold\n DEFAULT: 5")
("ind-clip-gradients,i",po::value<precision>(&BZ_CUDA::ind_norm_clip_thres),"Set gradient clipping threshold for individual elements\n DEFAULT: 0.1")
("whole-clip-gradients,w",po::value<precision>(¶ms.norm_clip),"Set gradient clipping threshold for all gradients\n DEFAULT: 5")
("adaptive-halve-lr,a",po::value<std::vector<std::string>> (&adaptive_learning_rate)->multitoken(),"change the learning rate"\
" when the perplexity on your specified dev set decreases from the previous half epoch by some constant, so "\
" new_learning_rate = constant*old_learning rate, by default the constant is 0.5, but can be set using adaptive-decrease-factor\n"
"FORMAT: (if sequence to sequence): <source dev file name> <target dev file name>\n"\
"FORMAT: (if sequence): <target dev file name>")
("adaptive-decrease-factor,A",po::value<precision>(¶ms.decrease_factor),"To be used with adaptive-halve-lr"\
" it\n DEFAULT: 0.5")
("fixed-halve-lr",po::value<int> (¶ms.epoch_to_start_halving),"Halve the learning rate"\
" after a certain epoch, every half epoch afterwards by a specific amount")
("fixed-halve-lr-full",po::value<int> (¶ms.epoch_to_start_halving_full),"Halve the learning rate"\
" after a certain epoch, every epoch afterwards by a specific amount")
("minibatch-size,m",po::value<int>(¶ms.minibatch_size),"Set minibatch size\n DEFAULT: 128")
("screen-print-rate",po::value<int>(¶ms.screen_print_rate),"Set after how many minibatched you want to print training info to the screen\n DEFAULT: 5")
("HPC-output",po::value<std::string>(¶ms.HPC_output_file_name),"Use if you want to have the terminal output also be put to a" \
"file \n FORMAT: <file name>")
("best-model,B",po::value<std::string>(¶ms.best_model_file_name),"During train have the best model be written to a file\nFORMAT: <output file name>")
("kbest,k",po::value<std::vector<std::string> > (&kbest_files)->multitoken(),"Get k best paths in sequence to sequence model. You can specify more than one model for ensemble decoding\n"\
"FORMAT: <how many paths> <source file name> <neural network file name 1> <neural network file name 2> ... <output file name>")
("beam-size,b",po::value<int>(¶ms.beam_size),"Set beam size for kbest paths\n DEFAULT: 12")
("penalty,p",po::value<precision>(¶ms.penalty),"Set penalty for kbest decoding. The value entered"\
" will be added to the log probability score per target word decoded. This can make the model favor longer sentences for decoding\n DEFAULT: 0")
("print-score",po::value<bool>(¶ms.print_score),"Set if you want to print out the unnormalized log prob for each path "\
"FORMAT: <bool> \nthe bool is 1 if you want to print the score or 0 otherwise.\n DEFAULT: false")
("dec-ratio",po::value<std::vector<precision>>(&decoding_ratio)->multitoken(),"Set the min and max decoding length rations\n"\
"This means that a target decoded sentence must be at least min_dec_ratio*len(source sentence)"\
" and not longer than max_dec_ratio*len(source sentence)\nFORMAT: <min ration> <max ratio>\n"\
"DEFAULT: 0.5, 1.5")
("Dump-LSTM",po::value<std::string>(¶ms.LSTM_dump_file),"Print the output at each timestep from the LSTM\nFORMAT: <output file name>\n"\
"The file lines that are output are the following: 1.input word, embedding 2.Forget gate 3.input gate"\
" 4.c_t 5.output gate 6.h_t 7.probabilities");
po::variables_map vm;
try {
po::store(po::parse_command_line(argc, argv, desc), vm);
po::notify(vm);
//see if the user specified the help flag
if ( vm.count("help") ) {
std::cout << "\n------------------------------\n";
std::cout << "This is Barret Zoph's GPU RNN library\n"
<< "The flags for the command line interface are below\n"
<< "" << "\n";
std::cout << desc << "\n";
exit (EXIT_FAILURE);
}
//error checks to be sure only once of these options is set
if (vm.count("train") && vm.count("kbest")) {
std::cout << "ERROR: you cannot train and get kbest at the same time\n";
exit (EXIT_FAILURE);
}
if (vm.count("train") && vm.count("force-decode")) {
std::cout << "ERROR: you cannot train and force-decode at the same time\n";
exit (EXIT_FAILURE);
}
if (vm.count("force-decode") && vm.count("kbest")) {
std::cout << "ERROR: you cannot force-decode and get kbest at the same time\n";
exit (EXIT_FAILURE);
}
if (!(vm.count("train") || vm.count("force-decode") || vm.count("kbest")||vm.count("stoch-gen") || vm.count("cont-train") )) {
std::cout << "ERROR: you must either train,continue training,get kbest,stoch generate data or force-decode\n";
exit (EXIT_FAILURE);
}
params.longest_sent+=4; //because it is really 4 less
if(vm.count("train") || vm.count("cont-train")) {
//some basic error checks to parameters
if(params.learning_rate<=0) {
std::cout << "ERROR: you cannot have a learning rate <=0\n";
exit (EXIT_FAILURE);
}
if(params.minibatch_size<=0) {
std::cout << "ERROR: you cannot have a minibatch of size <=0\n";
exit (EXIT_FAILURE);
}
if(params.LSTM_size<=0) {
std::cout << "ERROR: you cannot have a hiddenstate of size <=0\n";
exit (EXIT_FAILURE);
}
if(params.source_vocab_size<=0) {
if(params.source_vocab_size!=-1) {
std::cout << "ERROR: you cannot have a source_vocab_size <=0\n";
exit (EXIT_FAILURE);
}
}
if(params.target_vocab_size<=0) {
if(params.target_vocab_size!=-1) {
std::cout << "ERROR: you cannot have a target_vocab_size <=0\n";
exit (EXIT_FAILURE);
}
}
if(params.norm_clip<=0) {
std::cout << "ERROR: you cannot have your norm clip <=0\n";
exit (EXIT_FAILURE);
}
if(params.num_epochs<=0) {
std::cout << "ERROR: you cannot have num_epochs <=0\n";
exit (EXIT_FAILURE);
}
if(vm.count("HPC-output")) {
params.HPC_output = true;
}
if(vm.count("dropout")) {
params.dropout = true;
if(params.dropout_rate < 0 || params.dropout_rate > 1) {
std::cout << "ERROR: dropout rate must be between 0 and 1\n";
exit (EXIT_FAILURE);
}
}
if(vm.count("matrix-clip-gradients")) {
BZ_CUDA::global_clip_flag = false;
params.clip_gradient = true;
BZ_CUDA::individual_grad_clip = false;
}
if(vm.count("whole-clip-gradients")) {
BZ_CUDA::global_clip_flag = true;
params.clip_gradient = false;
BZ_CUDA::individual_grad_clip = false;
}
if(vm.count("ind-clip-gradients")) {
BZ_CUDA::global_clip_flag = false;
params.clip_gradient = false;
BZ_CUDA::individual_grad_clip = true;
}
if(vm.count("NCE")) {
params.NCE = true;
params.softmax = false;
BZ_CUDA::print_partition_function = true;
}
if(vm.count("UNK-replacement")) {
params.unk_replace = true;
}
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.train_file_name = params.unique_dir+"/train.txt";
//number of layers
//error checking is done when initializing model
if(vm.count("multi-gpu")) {
params.gpu_indicies = gpu_indicies;
}
if(vm.count("cont-train")) {
//sequence model
if(vm.count("sequence")) {
if(cont_train.size()!=2) {
std::cout << cont_train.size() << "\n";
std::cout << "ERROR: two arguements to be supplied to the continue train flag\n"\
" 1. train data file name, 2. neural network file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.attent_params.attention_model = false;
params.target_file_name = cont_train[0];
params.input_weight_file = cont_train[1];
params.output_weight_file = cont_train[1];
params.LM = true;
params.load_model_train = true;
params.load_model_name = params.input_weight_file;
input_file_prep input_helper;
input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.train_file_name,
params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size,params.num_layers);
}
else {
if(cont_train.size()!=3) {
std::cout << "ERROR: three arguements to be supplied to the continue train flag\n"\
" 1. source train data file name 2. target train data file name 3. neural network file name \n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = cont_train[0];
params.target_file_name = cont_train[1];
params.input_weight_file = cont_train[2];
params.output_weight_file = cont_train[2];
params.load_model_train = true;
params.load_model_name = params.input_weight_file;
if(params.source_file_name == params.target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name,
params.target_file_name,params.train_file_name,params.longest_sent,params.minibatch_size,params.LSTM_size,
params.source_vocab_size,params.target_vocab_size,params.num_layers);
}
}
else {
if(vm.count("num-layers")) {
if(params.num_layers <=0) {
std::cout << "ERROR: you must have >= 1 layer for your model\n";
exit (EXIT_FAILURE);
}
}
//now create the necessary files
if(vm.count("sequence")) {
if(train_files.size()!=2) {
std::cout << "ERROR: two arguements to be supplied to the train flag"\
" 1. train data file name, 2. neural network output name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.attent_params.attention_model = false;
params.LM = true;
params.target_file_name = train_files[0];
params.output_weight_file = train_files[1];
input_file_prep input_helper;
if(vm.count("train-ensemble")) {
params.ensemble_train = true;
}
//this outputs the train.txt file along with the mappings and first line
bool success=true;
if(!params.ensemble_train) {
if(vm.count("carve_sequence_data")) {
params.carve_data = true;
input_helper.prep_files_train_LM_carve(params.minibatch_size,
params.target_file_name,
params.train_file_name,params.target_vocab_size,
params.output_weight_file,params.LSTM_size,params.num_layers,
params.backprop_len);
}
else {
success = input_helper.prep_files_train_LM(params.minibatch_size,params.longest_sent,
params.target_file_name,
params.train_file_name,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers);
}
}
else {
success = input_helper.prep_files_train_LM_ensemble(params.minibatch_size,params.longest_sent,
params.target_file_name,
params.train_file_name,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.ensemble_train_file_name);
}
//clean up if error
if(!success) {
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
else {
//then sequence to sequence model
if(train_files.size()!=3) {
std::cout << train_files.size() <<"\n";
std::cout << "ERROR: three arguements to be supplied to the train flag for the sequence to sequence model\n"\
" 1. source train data file name\n 2. target train data file name \n3. neural network output name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = train_files[0];
params.target_file_name = train_files[1];
params.output_weight_file = train_files[2];
if(params.source_file_name == params.target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
//see if ensemble training
if(vm.count("train-ensemble")) {
params.ensemble_train = true;
}
input_file_prep input_helper;
bool success=true;
if(!params.ensemble_train) {
success = input_helper.prep_files_train_nonLM(params.minibatch_size,params.longest_sent,
params.source_file_name,params.target_file_name,
params.train_file_name,params.source_vocab_size,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.unk_replace,params.unk_aligned_width);
}
else {
success = input_helper.prep_files_train_nonLM_ensemble(params.minibatch_size,params.longest_sent,
params.source_file_name,params.target_file_name,
params.train_file_name,params.source_vocab_size,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.ensemble_train_file_name);
}
//clean up if error
if(!success) {
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
}
if(vm.count("parameter-range")) {
if(lower_upper_range.size()!=2) {
std::cout << "ERROR: you must have two inputs to parameter-range\n1.lower bound\n2. upper bound\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
BZ_CUDA::lower = lower_upper_range[0];
BZ_CUDA::upper = lower_upper_range[1];
if(BZ_CUDA::lower >= BZ_CUDA::upper) {
std::cout << "ERROR: the lower parameter range cannot be greater than the upper range\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
if(vm.count("fixed-halve-lr-full")) {
params.stanford_learning_rate = true;
}
if(vm.count("fixed-halve-lr")) {
params.google_learning_rate = true;
if(params.epoch_to_start_halving<=0) {
std::cout << "ERROR: cannot halve learning rate until 1st epoch \n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
if(vm.count("adaptive-halve-lr")) {
params.learning_rate_schedule = true;
if(vm.count("sequence")) {
if(adaptive_learning_rate.size()!=1) {
std::cout << "ERROR: adaptive-halve-lr takes one arguement\n1.dev file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.dev_target_file_name = adaptive_learning_rate[0];
params.test_file_name = params.unique_dir + "/validation.txt";
input_file_prep input_helper;
input_helper.integerize_file_LM(params.output_weight_file,params.dev_target_file_name,params.test_file_name,
params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size,params.num_layers);
}
else {
if(adaptive_learning_rate.size()!=2) {
std::cout << "ERROR: adaptive-halve-lr takes two arguements\n1.source dev file name\n2.target dev file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.dev_source_file_name = adaptive_learning_rate[0];
params.dev_target_file_name = adaptive_learning_rate[1];
params.test_file_name = params.unique_dir + "/validation.txt";
if(params.dev_source_file_name == params.dev_target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.output_weight_file,params.dev_source_file_name,
params.dev_target_file_name,params.test_file_name,
params.longest_sent,params.minibatch_size,params.LSTM_size,params.source_vocab_size,params.target_vocab_size,params.num_layers);
}
if(vm.count("best-model")) {
params.best_model = true;
}
}
if(vm.count("truncated-softmax")) {
params.shortlist_size = std::stoi(trunc_info[0]);
params.sampled_size = std::stoi(trunc_info[1]);
params.truncated_softmax = true;
if(params.shortlist_size + params.sampled_size > params.target_vocab_size) {
std::cout << "ERROR: you cannot have shortlist size + sampled size >= target vocab size\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
params.train= true;
params.decode=false;
params.test = false;
params.stochastic_generation = false;
return;
}
if(vm.count("kbest")) {
if (kbest_files.size()<4) {
std::cout << "ERROR: at least 4 arguements must be entered for kbest, 1. number of best paths\n"\
" 2 input file name \n"
" 3. neural network file name (this is the output file you get after training the neural network)\n"\
" 4. output file name\n"\
"Additionally more neural network file names can be added to do ensemble decoding\n";
exit (EXIT_FAILURE);
}
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
//for ensembles
std::vector<std::string> model_names;
for(int i=2; i<kbest_files.size()-1; i++) {
model_names.push_back(kbest_files[i]);
}
params.model_names = model_names;
params.decode_file_name = params.unique_dir+"/decoder_input.txt";
params.decoder_output_file = params.unique_dir+"/decoder_output.txt";
params.num_hypotheses =std::stoi(kbest_files[0]);
params.decode_tmp_file = kbest_files[1];
params.input_weight_file = model_names[0];
params.decoder_final_file = kbest_files.back();
input_file_prep input_helper;
// input_helper.integerize_file_LM(params.input_weight_file,params.decode_tmp_file,"tmp/decoder_input.txt",
// params.longest_sent,1,false,params.LSTM_size,params.target_vocab_size,true,params.source_vocab_size);
input_helper.integerize_file_kbest(params.input_weight_file,params.decode_tmp_file,params.decode_file_name,
params.longest_sent,params.LSTM_size,params.target_vocab_size,params.source_vocab_size,params.num_layers);
if(vm.count("multi-gpu")) {
if(gpu_indicies.size()!=model_names.size()) {
std::cout << "ERROR: for decoding, each model must be specified a gpu\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.gpu_indicies = gpu_indicies;
}
else {
for(int i=0; i<model_names.size(); i++) {
params.gpu_indicies.push_back(0);
}
}
if(params.beam_size<=0) {
std::cout << "ERROR: beam size cannot be <=0\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
if(params.penalty<0) {
std::cout << "ERROR: penalty cannot be less than zero\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
if(vm.count("Dump-LSTM")) {
params.dump_LSTM=true;
}
if(vm.count("dec-ratio")) {
if(decoding_ratio.size()!=2) {
std::cout << "Decoding ratio size: " << decoding_ratio.size() << "\n";
std::cout << decoding_ratio[0] << "\n";
std::cout << "ERROR: only two inputs for decoding ratio\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.min_decoding_ratio = decoding_ratio[0];
params.max_decoding_ratio = decoding_ratio[1];
if(params.min_decoding_ratio >= params.max_decoding_ratio) {
std::cout << "ERROR: min decoding ratio must be <= max_decoding_ratio\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
params.train = false;
params.decode = true;
params.test = false;
params.stochastic_generation = false;
params.LM = false;
return;
}
if(vm.count("force-decode")) {
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.test_file_name = params.unique_dir + "/validation.txt";
if(vm.count("sequence")) {
if(test_files.size()!=3) {
std::cout << "ERROR: force-decode takes three arguements 1.input file name (input sentences)"\
"2. neural network file name 3.output file name \n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.attent_params.attention_model = false;
params.target_file_name = test_files[0];
params.input_weight_file = test_files[1];
params.output_force_decode = test_files[2];
params.LM = true;
input_file_prep input_helper;
input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.test_file_name,
params.longest_sent,1,false,params.LSTM_size,params.target_vocab_size,params.num_layers);
}
else {
if(test_files.size()!=4) {
std::cout << "ERROR: force-decode takes four arguements: 1. source input file"\
" 2. target input file 3. neural network file name 4. output file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = test_files[0];
params.target_file_name = test_files[1];
params.input_weight_file = test_files[2];
params.output_force_decode = test_files[3];
//stuff for attention model alignments
params.attent_params.tmp_alignment_file = params.unique_dir + "/alignments.txt";
if(params.source_file_name == params.target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name,
params.target_file_name,params.test_file_name,params.longest_sent,1,params.LSTM_size,
params.source_vocab_size,params.target_vocab_size,params.num_layers);
}
params.train= false;
params.decode=false;
params.test = true;
params.minibatch_size=1;
params.stochastic_generation = false;
return;
}
if(vm.count("stoch-gen")) {
if(!vm.count("sequence")) {
std::cout << "ERROR: you can only do stoch-gen on the sequence model\n";
exit (EXIT_FAILURE);
}
if(stoicgen_files.size()!=2) {
std::cout << "ERROR: stoch-gen takes two inputs"\
" 1. neural network file name 2. output file name\n";
exit (EXIT_FAILURE);
}
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.sg_output_file_temp = params.unique_dir + "/sg.txt";
params.input_weight_file = stoicgen_files[0];
params.sg_output_file = stoicgen_files[1];
std::ifstream weights_file;
std::vector<std::string> info;
std::string str;
std::string word;
weights_file.open(params.input_weight_file.c_str());
weights_file.seekg(0, std::ios::beg);
std::getline(weights_file, str); //info from first sentence
std::istringstream iss(str, std::istringstream::in);
while(iss >> word) {
info.push_back(word);
}
weights_file.close();
params.LSTM_size = std::stoi(info[1]);
params.target_vocab_size = std::stoi(info[2]);
params.LM = true;
params.train= false;
params.decode = false;
params.test = false;
params.minibatch_size = 1;
params.stochastic_generation = true;
return;
}
}
catch(po::error& e) {
std::cerr << "ERROR: " << e.what() << std::endl << std::endl;
//std::cerr << desc << std::endl;
exit (EXIT_FAILURE);
}
}
int main(int argc, char **argv) {
//Timing stuff
std::chrono::time_point<std::chrono::system_clock> start_total,
end_total, begin_minibatch,end_minibatch,begin_decoding,end_decoding;
std::chrono::duration<double> elapsed_seconds;
start_total = std::chrono::system_clock::now();
//Initializing the model
global_params params; //Declare all of the global parameters
//create tmp directory if it does not exist already
// if( !(boost::filesystem::exists("tmp/"))) {
// std::cout << "Creating tmp directory for program\n";
// boost::filesystem::create_directory("tmp/");
// }
//file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file); //Initialize the file information
//get the command line arguements
command_line_parse(params,argc,argv);
//randomize the seed
if(params.random_seed) {
BZ_CUDA::gen.seed(static_cast<unsigned int>(std::time(0)));
}
neuralMT_model<precision> model; //This is the model
params.printIntroMessage();
if(!params.decode) {
model.initModel(params.LSTM_size,params.minibatch_size,params.source_vocab_size,params.target_vocab_size,
params.longest_sent,params.debug,params.learning_rate,params.clip_gradient,params.norm_clip,
params.input_weight_file,params.output_weight_file,params.softmax_scaled,params.train_perplexity,params.truncated_softmax,
params.shortlist_size,params.sampled_size,params.LM,params.num_layers,params.gpu_indicies,params.dropout,
params.dropout_rate,params.attent_params,params);
}
if(params.load_model_train) {
std::string temp_swap_weights = model.input_weight_file;
model.input_weight_file = params.load_model_name;
model.load_weights();
model.input_weight_file = temp_swap_weights;
}
std::ofstream HPC_output;
if(params.HPC_output) {
HPC_output.open("HPC_OUTPUT.txt");
}
////////////////////////////////////Train the model//////////////////////////////////////
if(params.train) {
//info for averaging the speed
int curr_batch_num_SPEED = 0;
const int thres_batch_num_SPEED = params.screen_print_rate;//set this to whatever
int total_words_batch_SPEED = 0;
double total_batch_time_SPEED = 0;
//File info for the training file
file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,params.train_total_words,params.truncated_softmax,
params.shortlist_size,params.sampled_size); //Initialize the file information
//model.initFileInfo(&file_info);
params.half_way_count = params.train_total_words/2;
if(params.google_learning_rate) {
std::cout << "Words at which to start halving the learning rate: " << params.half_way_count << "\n";
if(params.HPC_output) {
HPC_output << "Words at which to start halving the learning rate: " << params.half_way_count << "\n";
HPC_output.flush();
}
}
int current_epoch = 1;
std::cout << "Starting model training\n";
std::cout << "Starting epoch 1\n";
if(params.HPC_output) {
HPC_output << "Starting model training\n";
HPC_output << "Starting epoch 1\n";
HPC_output.flush();
}
//stuff for learning rate schedule
int total_words = 0;
precision temp_learning_rate = params.learning_rate; //This is only for the google learning rate
bool learning_rate_flag =true;//used for google learning rate for halving at every 0.5 epochs
double old_perplexity = 0;
model.train_perplexity = 0; //set the model perplexity to zero
while(current_epoch <= params.num_epochs) {
begin_minibatch = std::chrono::system_clock::now();
bool success = file_info.read_minibatch();
end_minibatch = std::chrono::system_clock::now();
elapsed_seconds = end_minibatch-begin_minibatch;
//std::cout << "File I/O time: " << elapsed_seconds.count()/60.0 << " minutes\n";
total_batch_time_SPEED+= elapsed_seconds.count();
begin_minibatch = std::chrono::system_clock::now();
//cudaProfilerStart();
model.initFileInfo(&file_info);
model.compute_gradients(file_info.minibatch_tokens_source_input,file_info.minibatch_tokens_source_output,
file_info.minibatch_tokens_target_input,file_info.minibatch_tokens_target_output,
file_info.h_input_vocab_indicies_source,file_info.h_output_vocab_indicies_source,
file_info.h_input_vocab_indicies_target,file_info.h_output_vocab_indicies_target,
file_info.current_source_length,file_info.current_target_length,
file_info.h_input_vocab_indicies_source_Wgrad,file_info.h_input_vocab_indicies_target_Wgrad,
file_info.len_source_Wgrad,file_info.len_target_Wgrad,file_info.h_sampled_indices,
file_info.len_unique_words_trunc_softmax,file_info.h_batch_info);
//cudaProfilerStop();
//return;
// return 0;
end_minibatch = std::chrono::system_clock::now();
elapsed_seconds = end_minibatch-begin_minibatch;
total_batch_time_SPEED+= elapsed_seconds.count();
total_words_batch_SPEED+=file_info.words_in_minibatch;
if(curr_batch_num_SPEED>=thres_batch_num_SPEED) {
std::cout << "Recent batch gradient L2 norm size: " << BZ_CUDA::global_norm << "\n";
std::cout << "Batched Minibatch time: " << total_batch_time_SPEED/60.0 << " minutes\n";
std::cout << "Batched Words in minibatch: " << total_words_batch_SPEED << "\n";
std::cout << "Batched Throughput: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n";
std::cout << total_words << " out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n";
if(params.HPC_output) {
HPC_output << "Recent batch gradient L2 norm size: " << BZ_CUDA::global_norm << "\n";
HPC_output << "Batched Minibatch time: " << total_batch_time_SPEED/60.0 << " minutes\n";
HPC_output << "Batched Words in minibatch: " << total_words_batch_SPEED << "\n";
HPC_output << "Batched Throughput: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n";
HPC_output << total_words << " out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n";
HPC_output.flush();
}
total_words_batch_SPEED = 0;
total_batch_time_SPEED = 0;
curr_batch_num_SPEED = 0;
}
curr_batch_num_SPEED++;
total_words += file_info.words_in_minibatch;
//stuff for google learning rate
if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving && total_words>=params.half_way_count &&
learning_rate_flag) {
temp_learning_rate = temp_learning_rate/2;
std::cout << "New Learning Rate: " << temp_learning_rate << "\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = false;
if(params.HPC_output) {
HPC_output << "New Learning Rate: " << temp_learning_rate << "\n";
HPC_output.flush();
}
}
//stuff for perplexity based learning schedule
if(params.learning_rate_schedule && total_words>=params.half_way_count &&learning_rate_flag) {
learning_rate_flag = false;
double new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,HPC_output,false,params.test_total_words,params.HPC_output,false,"");
std::cout << "Old dev set Perplexity: " << old_perplexity << "\n";
std::cout << "New dev set Perplexity: " << new_perplexity << "\n";
if(params.HPC_output) {
HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n";
HPC_output << "New dev set Perplexity: " << new_perplexity << "\n";
HPC_output.flush();
}
if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) {
temp_learning_rate = temp_learning_rate*params.decrease_factor;
model.update_learning_rate(temp_learning_rate);
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
}
//perplexity is better so output the best model file
if(params.best_model && params.best_model_perp > new_perplexity) {
std::cout << "Now outputting the new best model\n";
model.dump_best_model(params.best_model_file_name,params.output_weight_file);
if(params.HPC_output) {
HPC_output << "Now outputting the new best model\n";
HPC_output.flush();
}
params.best_model_perp = new_perplexity;
}
old_perplexity = new_perplexity;
}
if(!success) {
current_epoch+=1;
//stuff for google learning rate schedule
if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving) {
temp_learning_rate = temp_learning_rate/2;
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = true;
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
}
//stuff for stanford learning rate schedule
if(params.stanford_learning_rate && current_epoch>=params.epoch_to_start_halving_full) {
temp_learning_rate = temp_learning_rate/2;
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = true;
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
}
double new_perplexity;
if(params.learning_rate_schedule) {
new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,HPC_output,false,params.test_total_words,params.HPC_output,false,"");
}
//stuff for perplexity based learning schedule
if(params.learning_rate_schedule) {
std::cout << "Old dev set Perplexity: " << old_perplexity << "\n";
std::cout << "New dev set Perplexity: " << new_perplexity << "\n";
if(params.HPC_output) {
HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n";
HPC_output << "New dev set Perplexity: " << new_perplexity << "\n";
HPC_output.flush();
}
if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) {
temp_learning_rate = temp_learning_rate*params.decrease_factor;
model.update_learning_rate(temp_learning_rate);
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
}
//perplexity is better so output the best model file
if(params.best_model && params.best_model_perp > new_perplexity) {
std::cout << "Now outputting the new best model\n";
model.dump_best_model(params.best_model_file_name,params.output_weight_file);
if(params.HPC_output) {
HPC_output << "Now outputting the new best model\n";
HPC_output.flush();
}
params.best_model_perp = new_perplexity;
}
learning_rate_flag = true;
old_perplexity = new_perplexity;
}
if(params.train_perplexity) {
model.train_perplexity = model.train_perplexity/std::log(2.0);
std::cout << "PData on train set:" << model.train_perplexity << "\n";
std::cout << "Total target words: " << file_info.total_target_words << "\n";
std::cout << "Training set perplexity: " << std::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n";
if(params.HPC_output) {
HPC_output << "Training set perplexity: " << std::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n";
HPC_output.flush();
}
model.train_perplexity = 0;
}
total_words=0;
if(current_epoch <= params.num_epochs) {
std::cout << "-----------------------------------" << std::endl;
std::cout << "Starting epoch " << current_epoch << std::endl;
std::cout << "-----------------------------------" << std::endl;
if(params.HPC_output) {
HPC_output << "-----------------------------------" << std::endl;
HPC_output << "Starting epoch " << current_epoch << std::endl;
HPC_output << "-----------------------------------" << std::endl;
HPC_output.flush();
}
}
}
devSynchAll();
}
//Now that training is done, dump the weights
devSynchAll();
model.dump_weights();
}
/////////////////////////////////Get perplexity on test set////////////////////////////////
if(params.test) {
model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,HPC_output,true,params.test_total_words,params.HPC_output,true,params.output_force_decode);
//now unint alignments
if(model.attent_params.dump_alignments) {
input_file_prep input_helper;
model.output_alignments.close();
input_helper.unint_alignments(params.input_weight_file,params.attent_params.tmp_alignment_file,params.attent_params.alignment_file);
}
}
if(params.LM && params.stochastic_generation) {
model.stoicastic_generation(params.sg_length,params.sg_output_file_temp,params.temperature);
input_file_prep input_helper;
input_helper.unint_file(params.input_weight_file,params.sg_output_file_temp,params.sg_output_file,true,false);
}
///////////////////////////////////////////decode the model////////////////////////////////////////////
if(params.decode) {
//std::cout << "-----------------Starting Decoding----------------\n";
begin_decoding = std::chrono::system_clock::now();
ensemble_factory<precision> ensemble_decode(params.model_names,params.num_hypotheses,params.beam_size, params.min_decoding_ratio,
params.penalty, params.longest_sent,params.decode_num_lines_in_file,params.print_score,
params.decoder_output_file,params.decode_file_name,
params.gpu_indicies,params.max_decoding_ratio,params.target_vocab_size,
params.dump_LSTM,params.LSTM_dump_file,params);
std::cout << "-----------------Starting Decoding----------------\n";
ensemble_decode.decode_file();
end_decoding = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end_decoding-begin_decoding;
std::cout << "Decoding time: " << elapsed_seconds.count()/60.0 << " minutes\n";
//now unintegerize the file
input_file_prep input_helper;
input_helper.unint_file(params.input_weight_file,params.decoder_output_file,params.decoder_final_file,false,true);
}
//remove the temp directory created
if(params.unique_dir!="NULL") {
boost::filesystem::path temp_path(params.unique_dir);
//boost::filesystem::remove_all(temp_path);
}
//Compute the final runtime
end_total = std::chrono::system_clock::now();
elapsed_seconds = end_total-start_total;
std::cout << "\n\n\n";
std::cout << "Total Program Runtime: " << elapsed_seconds.count()/60.0 << " minutes" << std::endl;
}
|
6ea4a94653f25b3c3d124235bab056f09edcd8ab.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include <algorithm>
#include <memory>
#include "config/config.h"
#include "pt/tracing.h"
#include "scene.h"
#include "utils/rnd.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void debug(Group *group) {
}
__global__ void init(Camera *cam, Vec *result, hiprandState_t *states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= cam->n_sub) return;
int pixel_idx = idx / cam->subpixel2;
int y = pixel_idx / cam->w;
hiprand_init(y*y*y, idx, 0, &states[idx]);
result[idx] = Vec();
}
__global__ void kernelRayTrace(Group *group, Camera *cam, Vec *result, hiprandState_t *states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= cam->n_sub) return;
int pixel_idx = idx / cam->subpixel2;
int y = pixel_idx / cam->w;
int x = pixel_idx % cam->w;
int sy = (idx % cam->subpixel2) / cam->subpixel; // subpixel sampling
int sx = (idx % cam->subpixel2) % cam->subpixel;
hiprandState_t* st = &states[idx];
F cx = x + (sx+.5) / cam->subpixel, cy = y + (sy+.5) / cam->subpixel;
F dx = tent_filter(1/cam->subpixel, st), dy = tent_filter(1/cam->subpixel, st);
// camera model
Vec d = cam->x * ( (cx + dx) / cam->w - 0.5 ) + cam->y * ( (cy + dy) / cam->h - 0.5 ) + cam->_z;
Vec p = cam->o + d*cam->focus;
// Vec o = cam->o; // turn off dof
Vec o = cam->o + (rnd(10.0, st)-5) * cam->x + (rnd(10.0, st)-5) * cam->y; // turn on dof
result[idx] = result[idx] + tracing(group, Ray(o, (p-o).normal()), st);
}
__global__ void kernelCombResult(Vec *subpixel, Vec *pixel, Camera *cam, int samp) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= cam->n_pixel) return;
Vec res = Vec();
F div = 1. / samp;
for (int i = 0; i < cam->subpixel2; i++) {
Vec sub = subpixel[idx * cam->subpixel2 + i] * div;
res = res + Vec(clamp(sub.x), clamp(sub.y), clamp(sub.z)) / cam->subpixel2;
}
pixel[idx] = res;
}
int ceil_div(int x, int y) {
return (x + y - 1) / y;
}
int main(int argc, char *argv[]) {
printf("initial begin\n");
Scene scene;
printf("load scene finished\n");
Camera *cam;
hipMalloc((void**)&cam, sizeof(Camera));
hipMemcpy(cam, scene.cam, sizeof(Camera), hipMemcpyHostToDevice); // cpu -> gpu
Group *group = scene.group->to(); // cpu -> gpu
printf("initial end\n");
// { // debug block
// debug<<<dim3(1), dim3(1)>>>(group);
// gpuErrchk( hipDeviceSynchronize() );
// printf("debug test pass\n");
// }
hiprandState_t *states;
Vec *sub_result;
Vec *pixel_result;
hipMalloc((void**)&states, scene.cam->n_sub*sizeof(hiprandState_t));
hipMalloc((void**)&sub_result, scene.cam->n_sub*sizeof(Vec));
hipMalloc((void**)&pixel_result, scene.cam->n_pixel*sizeof(Vec));
dim3 blockDim(blocksize);
dim3 gridDim1(ceil_div(scene.cam->n_sub, blocksize));
dim3 gridDim2(ceil_div(scene.cam->n_pixel, blocksize));
hipLaunchKernelGGL(( init), dim3(gridDim1), dim3(blockDim), 0, 0, cam, sub_result, states);
for (int samp = 1; samp <= scene.cam->samps; ++samp) {
fprintf(stderr, "\rrendering %6d of %d", samp, scene.cam->samps);
hipLaunchKernelGGL(( kernelRayTrace), dim3(gridDim1), dim3(blockDim), 0, 0, group, cam, sub_result, states);
gpuErrchk( hipDeviceSynchronize() ); // wait all
hipLaunchKernelGGL(( kernelCombResult), dim3(gridDim2), dim3(blockDim), 0, 0, sub_result, pixel_result, cam, samp);
gpuErrchk( hipDeviceSynchronize() ); // wait all
if (samp % 100 == 0 || samp == scene.cam->samps-1) {
Vec *img = new RGB[scene.cam->n_pixel];
hipMemcpy(img, pixel_result, scene.cam->n_pixel*sizeof(Vec), hipMemcpyDeviceToHost); // gpu to cpu
FILE *f = fopen("image.ppm", "w");
fprintf(f, "P3\n%d %d\n%d\n", scene.cam->w, scene.cam->h, 255);
for (int i = 0; i < scene.cam->n_pixel; i++) {
fprintf(f, "%d %d %d ", toInt(img[i].x), toInt(img[i].y), toInt(img[i].z));
}
fclose(f);
delete[] img;
}
}
hipFree(states);
hipFree(sub_result);
hipFree(pixel_result);
hipFree(group);
hipFree(cam);
return 0;
}
|
6ea4a94653f25b3c3d124235bab056f09edcd8ab.cu
|
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include <algorithm>
#include <memory>
#include "config/config.h"
#include "pt/tracing.h"
#include "scene.h"
#include "utils/rnd.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void debug(Group *group) {
}
__global__ void init(Camera *cam, Vec *result, curandState *states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= cam->n_sub) return;
int pixel_idx = idx / cam->subpixel2;
int y = pixel_idx / cam->w;
curand_init(y*y*y, idx, 0, &states[idx]);
result[idx] = Vec();
}
__global__ void kernelRayTrace(Group *group, Camera *cam, Vec *result, curandState *states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= cam->n_sub) return;
int pixel_idx = idx / cam->subpixel2;
int y = pixel_idx / cam->w;
int x = pixel_idx % cam->w;
int sy = (idx % cam->subpixel2) / cam->subpixel; // subpixel sampling
int sx = (idx % cam->subpixel2) % cam->subpixel;
curandState* st = &states[idx];
F cx = x + (sx+.5) / cam->subpixel, cy = y + (sy+.5) / cam->subpixel;
F dx = tent_filter(1/cam->subpixel, st), dy = tent_filter(1/cam->subpixel, st);
// camera model
Vec d = cam->x * ( (cx + dx) / cam->w - 0.5 ) + cam->y * ( (cy + dy) / cam->h - 0.5 ) + cam->_z;
Vec p = cam->o + d*cam->focus;
// Vec o = cam->o; // turn off dof
Vec o = cam->o + (rnd(10.0, st)-5) * cam->x + (rnd(10.0, st)-5) * cam->y; // turn on dof
result[idx] = result[idx] + tracing(group, Ray(o, (p-o).normal()), st);
}
__global__ void kernelCombResult(Vec *subpixel, Vec *pixel, Camera *cam, int samp) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= cam->n_pixel) return;
Vec res = Vec();
F div = 1. / samp;
for (int i = 0; i < cam->subpixel2; i++) {
Vec sub = subpixel[idx * cam->subpixel2 + i] * div;
res = res + Vec(clamp(sub.x), clamp(sub.y), clamp(sub.z)) / cam->subpixel2;
}
pixel[idx] = res;
}
int ceil_div(int x, int y) {
return (x + y - 1) / y;
}
int main(int argc, char *argv[]) {
printf("initial begin\n");
Scene scene;
printf("load scene finished\n");
Camera *cam;
cudaMalloc((void**)&cam, sizeof(Camera));
cudaMemcpy(cam, scene.cam, sizeof(Camera), cudaMemcpyHostToDevice); // cpu -> gpu
Group *group = scene.group->to(); // cpu -> gpu
printf("initial end\n");
// { // debug block
// debug<<<dim3(1), dim3(1)>>>(group);
// gpuErrchk( cudaDeviceSynchronize() );
// printf("debug test pass\n");
// }
curandState *states;
Vec *sub_result;
Vec *pixel_result;
cudaMalloc((void**)&states, scene.cam->n_sub*sizeof(curandState));
cudaMalloc((void**)&sub_result, scene.cam->n_sub*sizeof(Vec));
cudaMalloc((void**)&pixel_result, scene.cam->n_pixel*sizeof(Vec));
dim3 blockDim(blocksize);
dim3 gridDim1(ceil_div(scene.cam->n_sub, blocksize));
dim3 gridDim2(ceil_div(scene.cam->n_pixel, blocksize));
init<<<gridDim1, blockDim>>>(cam, sub_result, states);
for (int samp = 1; samp <= scene.cam->samps; ++samp) {
fprintf(stderr, "\rrendering %6d of %d", samp, scene.cam->samps);
kernelRayTrace<<<gridDim1, blockDim>>>(group, cam, sub_result, states);
gpuErrchk( cudaDeviceSynchronize() ); // wait all
kernelCombResult<<<gridDim2, blockDim>>>(sub_result, pixel_result, cam, samp);
gpuErrchk( cudaDeviceSynchronize() ); // wait all
if (samp % 100 == 0 || samp == scene.cam->samps-1) {
Vec *img = new RGB[scene.cam->n_pixel];
cudaMemcpy(img, pixel_result, scene.cam->n_pixel*sizeof(Vec), cudaMemcpyDeviceToHost); // gpu to cpu
FILE *f = fopen("image.ppm", "w");
fprintf(f, "P3\n%d %d\n%d\n", scene.cam->w, scene.cam->h, 255);
for (int i = 0; i < scene.cam->n_pixel; i++) {
fprintf(f, "%d %d %d ", toInt(img[i].x), toInt(img[i].y), toInt(img[i].z));
}
fclose(f);
delete[] img;
}
}
cudaFree(states);
cudaFree(sub_result);
cudaFree(pixel_result);
cudaFree(group);
cudaFree(cam);
return 0;
}
|
e6d83071d7766eeb92dc34ca75cdf8b08c5ce349.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaRTCommon.h"
#define APPROX_TRACE
#define BLOCK_SIZE 16
#define NORMALRAY_BOUND_MAX 3
namespace cudaRTPTApprox
{
unsigned char g_paraCheckSum;
NPAttrHelper::Attrib g_traceLimitDepth0("TraceLimitDepth0", -1);
NPAttrHelper::Attrib g_traceLimitTime0("TraceLimitTime0", BVH_TRACE_MAX);
NPAttrHelper::Attrib g_traceLimitDepth1("TraceLimitDepth1", -1);
NPAttrHelper::Attrib g_traceLimitTime1("TraceLimitTime1", BVH_TRACE_MAX);
NPAttrHelper::Attrib g_traceLimitDepth2("TraceLimitDepth2", -1);
NPAttrHelper::Attrib g_traceLimitTime2("TraceLimitTime2", BVH_TRACE_MAX);
NPAttrHelper::Attrib g_envLightR("EnvLightR", 1.f);
NPAttrHelper::Attrib g_envLightG("EnvLightG", 1.f);
NPAttrHelper::Attrib g_envLightB("EnvLightB", 1.f);
CUDA_RT_COMMON_ATTRIBS_N(9)
CUDA_RT_COMMON_ATTRIBS_BGN
CUDA_RT_COMMON_ATTRIB_DECLARE(0, TraceLimitDepth0, g_traceLimitDepth0)
CUDA_RT_COMMON_ATTRIB_DECLARE(1, TraceLimitTime0, g_traceLimitTime0)
CUDA_RT_COMMON_ATTRIB_DECLARE(2, TraceLimitDepth1, g_traceLimitDepth1)
CUDA_RT_COMMON_ATTRIB_DECLARE(3, TraceLimitTime1, g_traceLimitTime1)
CUDA_RT_COMMON_ATTRIB_DECLARE(4, TraceLimitDepth2, g_traceLimitDepth2)
CUDA_RT_COMMON_ATTRIB_DECLARE(5, TraceLimitTime2, g_traceLimitTime2)
CUDA_RT_COMMON_ATTRIB_DECLARE(6, EnvLightR, g_envLightR)
CUDA_RT_COMMON_ATTRIB_DECLARE(7, EnvLightG, g_envLightG)
CUDA_RT_COMMON_ATTRIB_DECLARE(8, EnvLightB, g_envLightB)
CUDA_RT_COMMON_ATTRIBS_END
struct Parameters
{
uint32 traceLimitDepth0;
uint32 traceLimitDepth1;
uint32 traceLimitDepth2;
uint32 traceLimitTime0;
uint32 traceLimitTime1;
uint32 traceLimitTime2;
float3 envLight;
};
float* g_devResultData = nullptr;
float* g_devAccResultData = nullptr;
NPMathHelper::Mat4x4 g_matLastCamMat;
NPMathHelper::Mat4x4 g_matCurCamMat;
uint32 g_uCurFrameN;
size_t g_resultDataSize = 0;
struct ShootRayResult
{
float3 light;
};
template <int depth = 0>
__device__ ShootRayResult pt0_normalRay(const Parameters para, const CURay& ray, RTVertex* vertices, RTTriangle* triangles
, RTMaterial* materials, CURTTexture* textures, hiprandState_t *randstate, const uint frameN)
{
ShootRayResult rayResult;
if (depth > 5)
{
rayResult.light = make_float3(0.f, 0.f, 0.f);
return rayResult;
}
TracePrimitiveResult traceResult;
#ifdef APPROX_TRACE
uint32 traceMaxTime = BVH_TRACE_MAX;
uint32 traceMaxDepth = -1;
if(depth == 0)
{
traceMaxTime = para.traceLimitTime0;
traceMaxDepth = para.traceLimitDepth0;
}
else if (depth == 1)
{
traceMaxTime = para.traceLimitTime1;
traceMaxDepth = para.traceLimitDepth1;
}
else
{
traceMaxTime = para.traceLimitTime2;
traceMaxDepth = para.traceLimitDepth2;
}
if (TracePrimitiveWApprox(ray, traceResult, randstate, frameN, M_INF, M_FLT_BIAS_EPSILON, false, traceMaxTime, traceMaxDepth))
#else
if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false))
#endif
{
RTTriangle* tri = &triangles[traceResult.triId];
RTMaterial* mat = &materials[tri->matInd];
RTVertex* v0 = &vertices[tri->vertInd0];
RTVertex* v1 = &vertices[tri->vertInd1];
RTVertex* v2 = &vertices[tri->vertInd2];
float2 uv0 = make_float2(v0->tex._x, v0->tex._y);
float2 uv1 = make_float2(v1->tex._x, v1->tex._y);
float2 uv2 = make_float2(v2->tex._x, v2->tex._y);
float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v;
float3 n0 = V32F3(v0->norm);
float3 n1 = V32F3(v1->norm);
float3 n2 = V32F3(v2->norm);
float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v;
float3 diff;
float3 emissive;
float trans;
float specular;
float metallic;
float roughness;
float anisotropic;
float sheen;
float sheenTint;
float clearcoat;
float clearcoatGloss;
GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness
, anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss);
float3 shadeResult = make_float3(0.f,0.f,0.f);
float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm;
#define MICROFACET_MODEL
#ifdef MICROFACET_MODEL
{
// Get some random microfacet
float3 hDir = ImportanceSampleGGX(make_float2(hiprand_uniform(randstate), hiprand_uniform(randstate)), roughness, nl);
// Calculate flesnel
float voH = vecDot(-1 * ray.dir, hDir);
float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic);
float3 brdf_f = Fresnel(f0, voH);
// Reflected or Refracted
float reflProb = lerp(length(brdf_f), 1.0f, metallic);
float refrProb = trans;
float3 reflDir;
float3 refrDir;
if (refrProb > 0)
{
bool into = vecDot(nl, norm) > 0.f;
float nt = specular * 0.8f + 1.f;
float nc = 1.0f;
float nnt = into ? nc / nt : nt / nc;
float ddn = vecDot(hDir, ray.dir);
float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn);
if (cos2t < 0.f)
{
refrProb = 0.f;
}
else
{
refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t)));
}
}
if (reflProb > 0)
{
reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir,ray.dir));
if (vecDot(reflDir, nl) < 0.f)
reflProb = 0.f;
}
// Reflected
if (reflProb > 0 && hiprand_uniform(randstate) < reflProb)
{
CURay nextRay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir, reflDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
// Microfacet specular = D*G*F / (4*NoL*NoV)
// pdf = D * NoH / (4 * VoH)
// (G * F * VoH) / (NoV * NoH)
float VoH = vecDot(-1 * ray.dir, hDir);
float NoV = vecDot(nl, -1 * ray.dir);
float NoH = vecDot(nl, hDir);
float NoL = vecDot(nl, reflDir);
float G = GeometricVisibility(roughness, NoV, NoL, VoH);
shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive;
}
// Diffused or Transmited
else
{
// Transmited
if (refrProb > 0 && hiprand_uniform(randstate) < refrProb)
{
CURay nextRay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
float cosine = vecDot(-1 * nl, refrDir);
shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive;
}
// Diffused
else
{
float3 w = nl;
float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = vecCross(w, u);
u = vecCross(v, w);
float r1 = 2.f * M_PI * hiprand_uniform(randstate);
float r2cos = sqrtf(hiprand_uniform(randstate));
float r2sin = 1.f - r2cos*r2cos;
float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1));
CURay nextRay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
float VoH = vecDot(-1 * ray.dir, hDir);
float NoV = vecDot(nl, -1 * ray.dir);
float NoL = vecDot(nl, diffDir);
shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive;
}
}
}
#else
float refrProb = hiprand_uniform(randstate);
float3 refrDir = ray.dir;
if (refrProb < trans)
{
bool into = vecDot(nl, norm) > 0.f;
float nt = specular * 0.8f + 1.f;
float nc = 1.0f;
float nnt = into ? nc / nt : nt / nc;
float ddn = vecDot(nl, ray.dir);
float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn);
if (cos2t < 0.f)
{
float3 refDir = normalize(ray.dir - norm * 2 * vecDot(norm, ray.dir));
CURay nextRay(ray.orig + traceResult.dist * ray.dir + refDir * M_FLT_BIAS_EPSILON, refDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
float cosine = vecDot(nl, refDir);
shadeResult = (cosine * nextRayResult.light) / trans + emissive;
}
else
{
refrDir = normalize(ray.dir * nnt - norm * ((into ? 1 : -1)*(ddn*nnt + sqrtf(cos2t))));
float a = nt - nc;
float b = nt + nc;
float r0 = a * a / (b * b);
float c = 1.f - (into ? -ddn : vecDot(refrDir, norm));
float re = r0 + (1.f - r0)*c*c*c*c*c;
float tr = 1.f - re;
float p = re;
float reflProb = hiprand_uniform(randstate);
if (reflProb < p)
{
float3 refDir = normalize(ray.dir - norm * 2 * vecDot(norm, ray.dir));
CURay nextRay(ray.orig + traceResult.dist * ray.dir + refDir * M_FLT_BIAS_EPSILON, refDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
float cosine = vecDot(nl, refDir);
shadeResult = (re * cosine * nextRayResult.light) / (trans * p) + emissive;
}
else
{
CURay nextRay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
float cosine = vecDot(-1 * nl, refrDir);
shadeResult = (tr * cosine * nextRayResult.light) / (trans * (1.f - p)) + emissive;
}
}
}
else
{
float3 w = nl;
float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = vecCross(w, u);
u = vecCross(v, w);
//float r1 = 2.f * M_PI * hiprand_uniform(randstate);
//float r2 = hiprand_uniform(randstate);
//float r2s = sqrtf(r2);
//float3 refDir = normalize(u*cosf(r1)*r2s + v*sinf(r1)*r2s + w*sqrtf(1.f - r2));
float r1 = 2.f * M_PI * hiprand_uniform(randstate);
float r2 = 0.5f * M_PI * hiprand_uniform(randstate);
float r2sin = sinf(r2);
float3 refDir = normalize(w * cosf(r2) + u * r2sin * cosf(r1) + v * r2sin * sinf(r1));
CURay nextRay(ray.orig + traceResult.dist * ray.dir + refDir * M_FLT_BIAS_EPSILON, refDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
nextRayResult.light = nextRayResult.light;
float cosine = vecDot(nl, refDir);
shadeResult = (M_PI * cosine * vecMul(diff, nextRayResult.light)) / (1 - trans) + emissive;
}
#endif
rayResult.light = shadeResult;
}
else
{
rayResult.light = para.envLight;
}
return rayResult;
}
template <>
__device__ ShootRayResult pt0_normalRay<NORMALRAY_BOUND_MAX>(const Parameters para, const CURay& ray, RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures
, hiprandState_t *randstate, const uint frameN)
{
ShootRayResult rayResult;
rayResult.light.x = rayResult.light.y = rayResult.light.z = 0.f;
return rayResult;
}
uint32 WangHash(uint32 a) {
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
__global__ void pt0_kernel(const Parameters para, float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov,
float width, float height, RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures
, uint32 frameN, uint32 hashedFrameN, float* result, float* accResult)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint ind = (y * width + x) * 3;
int threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height;
float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f);
hiprandState_t randstate;
hiprand_init(hashedFrameN + ind, 0, 0, &randstate);
u = u + (hiprand_uniform(&randstate) - 0.5f) / width;
v = v + (hiprand_uniform(&randstate) - 0.5f) / height;
float3 dir = normalize(camRight * u + camUp * v + camDir);
CURay ray(camPos, dir);
ShootRayResult rayResult = pt0_normalRay(para, ray, vertices, triangles, materials, textures, &randstate, frameN);
float resultInf = 1.f / (float)(frameN + 1);
float oldInf = 1.f - resultInf;
result[ind] = max(resultInf * rayResult.light.x + oldInf * result[ind], 0.f);
result[ind + 1] = max(resultInf * rayResult.light.y + oldInf * result[ind + 1], 0.f);
result[ind + 2] = max(resultInf * rayResult.light.z + oldInf * result[ind + 2], 0.f);
}
void CleanMem()
{
freeAllBVHCudaMem();
CUFREE(g_devResultData);
CUFREE(g_devAccResultData);
}
bool Render(NPMathHelper::Vec3 camPos, NPMathHelper::Vec3 camDir, NPMathHelper::Vec3 camUp, float fov, RTScene* scene
, float width, float height, float* result)
{
// Check and allocate everything
if (!scene || !scene->GetCompactBVH()->IsValid())
return false;
NPMathHelper::Vec3 camRight = camDir.cross(camUp).normalize();
camUp = camRight.cross(camDir).normalize();
g_matLastCamMat = g_matCurCamMat;
g_matCurCamMat = NPMathHelper::Mat4x4::lookAt(camPos, camPos + camDir, camUp);
g_uCurFrameN = (g_matLastCamMat != g_matCurCamMat) ? 0 : g_uCurFrameN + 1;
unsigned char checksum = *(g_traceLimitDepth0.GetUint()) ^ *(g_traceLimitDepth1.GetUint()) ^ *(g_traceLimitDepth2.GetUint())
^ *(g_traceLimitTime0.GetUint()) ^ *(g_traceLimitTime1.GetUint()) ^ *(g_traceLimitTime2.GetUint())
^ (uint32)(*(g_envLightR.GetFloat()) * 10000.f) ^ (uint32)(*(g_envLightG.GetFloat()) * 10000.f) ^ (uint32)(*(g_envLightB.GetFloat()) * 10000.f);
if (!g_bIsCudaInit || scene->GetIsCudaDirty() || g_paraCheckSum != checksum)
{
g_paraCheckSum = checksum;
g_matLastCamMat = g_matCurCamMat;
g_uCurFrameN = 0;
initAllSceneCudaMem(scene);
}
else if (scene->GetIsCudaMaterialDirty())
{
updateAllSceneMaterialsCudaMem(scene);
g_uCurFrameN = 0;
}
if (!g_bIsCudaInit)
return false;
if (!g_devResultData || !g_devAccResultData || g_resultDataSize != (sizeof(float) * 3 * width * height))
{
g_resultDataSize = sizeof(float) * 3 * width * height;
CUFREE(g_devResultData);
hipMalloc((void**)&g_devResultData, g_resultDataSize);
CUFREE(g_devAccResultData);
hipMalloc((void**)&g_devAccResultData, g_resultDataSize);
}
float3 f3CamPos = V32F3(camPos);
float3 f3CamUp = V32F3(camUp);
float3 f3CamDir = V32F3(camDir);
float3 f3CamRight = V32F3(camRight);
Parameters para;
para.traceLimitDepth0 = *g_traceLimitDepth0.GetUint();
para.traceLimitDepth1 = *g_traceLimitDepth1.GetUint();
para.traceLimitDepth2 = *g_traceLimitDepth2.GetUint();
para.traceLimitTime0 = *g_traceLimitTime0.GetUint();
para.traceLimitTime1 = *g_traceLimitTime1.GetUint();
para.traceLimitTime2 = *g_traceLimitTime2.GetUint();
para.envLight.x = *g_envLightR.GetFloat();
para.envLight.y = *g_envLightG.GetFloat();
para.envLight.z = *g_envLightB.GetFloat();
// Kernel go here
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid(width / block.x, height / block.y, 1);
pt0_kernel << < grid, block >> > (para, f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height, g_devVertices, g_devTriangles, g_devMaterials, g_devTextures
, g_uCurFrameN, WangHash(g_uCurFrameN), g_devResultData, g_devAccResultData);
// Copy result to host
hipMemcpy(result, g_devResultData, g_resultDataSize, hipMemcpyDeviceToHost);
return true;
}
}
|
e6d83071d7766eeb92dc34ca75cdf8b08c5ce349.cu
|
#include "cudaRTCommon.h"
#define APPROX_TRACE
#define BLOCK_SIZE 16
#define NORMALRAY_BOUND_MAX 3
namespace cudaRTPTApprox
{
unsigned char g_paraCheckSum;
NPAttrHelper::Attrib g_traceLimitDepth0("TraceLimitDepth0", -1);
NPAttrHelper::Attrib g_traceLimitTime0("TraceLimitTime0", BVH_TRACE_MAX);
NPAttrHelper::Attrib g_traceLimitDepth1("TraceLimitDepth1", -1);
NPAttrHelper::Attrib g_traceLimitTime1("TraceLimitTime1", BVH_TRACE_MAX);
NPAttrHelper::Attrib g_traceLimitDepth2("TraceLimitDepth2", -1);
NPAttrHelper::Attrib g_traceLimitTime2("TraceLimitTime2", BVH_TRACE_MAX);
NPAttrHelper::Attrib g_envLightR("EnvLightR", 1.f);
NPAttrHelper::Attrib g_envLightG("EnvLightG", 1.f);
NPAttrHelper::Attrib g_envLightB("EnvLightB", 1.f);
CUDA_RT_COMMON_ATTRIBS_N(9)
CUDA_RT_COMMON_ATTRIBS_BGN
CUDA_RT_COMMON_ATTRIB_DECLARE(0, TraceLimitDepth0, g_traceLimitDepth0)
CUDA_RT_COMMON_ATTRIB_DECLARE(1, TraceLimitTime0, g_traceLimitTime0)
CUDA_RT_COMMON_ATTRIB_DECLARE(2, TraceLimitDepth1, g_traceLimitDepth1)
CUDA_RT_COMMON_ATTRIB_DECLARE(3, TraceLimitTime1, g_traceLimitTime1)
CUDA_RT_COMMON_ATTRIB_DECLARE(4, TraceLimitDepth2, g_traceLimitDepth2)
CUDA_RT_COMMON_ATTRIB_DECLARE(5, TraceLimitTime2, g_traceLimitTime2)
CUDA_RT_COMMON_ATTRIB_DECLARE(6, EnvLightR, g_envLightR)
CUDA_RT_COMMON_ATTRIB_DECLARE(7, EnvLightG, g_envLightG)
CUDA_RT_COMMON_ATTRIB_DECLARE(8, EnvLightB, g_envLightB)
CUDA_RT_COMMON_ATTRIBS_END
struct Parameters
{
uint32 traceLimitDepth0;
uint32 traceLimitDepth1;
uint32 traceLimitDepth2;
uint32 traceLimitTime0;
uint32 traceLimitTime1;
uint32 traceLimitTime2;
float3 envLight;
};
float* g_devResultData = nullptr;
float* g_devAccResultData = nullptr;
NPMathHelper::Mat4x4 g_matLastCamMat;
NPMathHelper::Mat4x4 g_matCurCamMat;
uint32 g_uCurFrameN;
size_t g_resultDataSize = 0;
struct ShootRayResult
{
float3 light;
};
template <int depth = 0>
__device__ ShootRayResult pt0_normalRay(const Parameters para, const CURay& ray, RTVertex* vertices, RTTriangle* triangles
, RTMaterial* materials, CURTTexture* textures, curandState *randstate, const uint frameN)
{
ShootRayResult rayResult;
if (depth > 5)
{
rayResult.light = make_float3(0.f, 0.f, 0.f);
return rayResult;
}
TracePrimitiveResult traceResult;
#ifdef APPROX_TRACE
uint32 traceMaxTime = BVH_TRACE_MAX;
uint32 traceMaxDepth = -1;
if(depth == 0)
{
traceMaxTime = para.traceLimitTime0;
traceMaxDepth = para.traceLimitDepth0;
}
else if (depth == 1)
{
traceMaxTime = para.traceLimitTime1;
traceMaxDepth = para.traceLimitDepth1;
}
else
{
traceMaxTime = para.traceLimitTime2;
traceMaxDepth = para.traceLimitDepth2;
}
if (TracePrimitiveWApprox(ray, traceResult, randstate, frameN, M_INF, M_FLT_BIAS_EPSILON, false, traceMaxTime, traceMaxDepth))
#else
if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false))
#endif
{
RTTriangle* tri = &triangles[traceResult.triId];
RTMaterial* mat = &materials[tri->matInd];
RTVertex* v0 = &vertices[tri->vertInd0];
RTVertex* v1 = &vertices[tri->vertInd1];
RTVertex* v2 = &vertices[tri->vertInd2];
float2 uv0 = make_float2(v0->tex._x, v0->tex._y);
float2 uv1 = make_float2(v1->tex._x, v1->tex._y);
float2 uv2 = make_float2(v2->tex._x, v2->tex._y);
float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v;
float3 n0 = V32F3(v0->norm);
float3 n1 = V32F3(v1->norm);
float3 n2 = V32F3(v2->norm);
float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v;
float3 diff;
float3 emissive;
float trans;
float specular;
float metallic;
float roughness;
float anisotropic;
float sheen;
float sheenTint;
float clearcoat;
float clearcoatGloss;
GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness
, anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss);
float3 shadeResult = make_float3(0.f,0.f,0.f);
float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm;
#define MICROFACET_MODEL
#ifdef MICROFACET_MODEL
{
// Get some random microfacet
float3 hDir = ImportanceSampleGGX(make_float2(curand_uniform(randstate), curand_uniform(randstate)), roughness, nl);
// Calculate flesnel
float voH = vecDot(-1 * ray.dir, hDir);
float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic);
float3 brdf_f = Fresnel(f0, voH);
// Reflected or Refracted
float reflProb = lerp(length(brdf_f), 1.0f, metallic);
float refrProb = trans;
float3 reflDir;
float3 refrDir;
if (refrProb > 0)
{
bool into = vecDot(nl, norm) > 0.f;
float nt = specular * 0.8f + 1.f;
float nc = 1.0f;
float nnt = into ? nc / nt : nt / nc;
float ddn = vecDot(hDir, ray.dir);
float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn);
if (cos2t < 0.f)
{
refrProb = 0.f;
}
else
{
refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t)));
}
}
if (reflProb > 0)
{
reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir,ray.dir));
if (vecDot(reflDir, nl) < 0.f)
reflProb = 0.f;
}
// Reflected
if (reflProb > 0 && curand_uniform(randstate) < reflProb)
{
CURay nextRay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir, reflDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
// Microfacet specular = D*G*F / (4*NoL*NoV)
// pdf = D * NoH / (4 * VoH)
// (G * F * VoH) / (NoV * NoH)
float VoH = vecDot(-1 * ray.dir, hDir);
float NoV = vecDot(nl, -1 * ray.dir);
float NoH = vecDot(nl, hDir);
float NoL = vecDot(nl, reflDir);
float G = GeometricVisibility(roughness, NoV, NoL, VoH);
shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive;
}
// Diffused or Transmited
else
{
// Transmited
if (refrProb > 0 && curand_uniform(randstate) < refrProb)
{
CURay nextRay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
float cosine = vecDot(-1 * nl, refrDir);
shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive;
}
// Diffused
else
{
float3 w = nl;
float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = vecCross(w, u);
u = vecCross(v, w);
float r1 = 2.f * M_PI * curand_uniform(randstate);
float r2cos = sqrtf(curand_uniform(randstate));
float r2sin = 1.f - r2cos*r2cos;
float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1));
CURay nextRay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
float VoH = vecDot(-1 * ray.dir, hDir);
float NoV = vecDot(nl, -1 * ray.dir);
float NoL = vecDot(nl, diffDir);
shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive;
}
}
}
#else
float refrProb = curand_uniform(randstate);
float3 refrDir = ray.dir;
if (refrProb < trans)
{
bool into = vecDot(nl, norm) > 0.f;
float nt = specular * 0.8f + 1.f;
float nc = 1.0f;
float nnt = into ? nc / nt : nt / nc;
float ddn = vecDot(nl, ray.dir);
float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn);
if (cos2t < 0.f)
{
float3 refDir = normalize(ray.dir - norm * 2 * vecDot(norm, ray.dir));
CURay nextRay(ray.orig + traceResult.dist * ray.dir + refDir * M_FLT_BIAS_EPSILON, refDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
float cosine = vecDot(nl, refDir);
shadeResult = (cosine * nextRayResult.light) / trans + emissive;
}
else
{
refrDir = normalize(ray.dir * nnt - norm * ((into ? 1 : -1)*(ddn*nnt + sqrtf(cos2t))));
float a = nt - nc;
float b = nt + nc;
float r0 = a * a / (b * b);
float c = 1.f - (into ? -ddn : vecDot(refrDir, norm));
float re = r0 + (1.f - r0)*c*c*c*c*c;
float tr = 1.f - re;
float p = re;
float reflProb = curand_uniform(randstate);
if (reflProb < p)
{
float3 refDir = normalize(ray.dir - norm * 2 * vecDot(norm, ray.dir));
CURay nextRay(ray.orig + traceResult.dist * ray.dir + refDir * M_FLT_BIAS_EPSILON, refDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
float cosine = vecDot(nl, refDir);
shadeResult = (re * cosine * nextRayResult.light) / (trans * p) + emissive;
}
else
{
CURay nextRay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
float cosine = vecDot(-1 * nl, refrDir);
shadeResult = (tr * cosine * nextRayResult.light) / (trans * (1.f - p)) + emissive;
}
}
}
else
{
float3 w = nl;
float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = vecCross(w, u);
u = vecCross(v, w);
//float r1 = 2.f * M_PI * curand_uniform(randstate);
//float r2 = curand_uniform(randstate);
//float r2s = sqrtf(r2);
//float3 refDir = normalize(u*cosf(r1)*r2s + v*sinf(r1)*r2s + w*sqrtf(1.f - r2));
float r1 = 2.f * M_PI * curand_uniform(randstate);
float r2 = 0.5f * M_PI * curand_uniform(randstate);
float r2sin = sinf(r2);
float3 refDir = normalize(w * cosf(r2) + u * r2sin * cosf(r1) + v * r2sin * sinf(r1));
CURay nextRay(ray.orig + traceResult.dist * ray.dir + refDir * M_FLT_BIAS_EPSILON, refDir);
ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(para, nextRay, vertices, triangles, materials, textures, randstate, frameN);
nextRayResult.light = nextRayResult.light;
float cosine = vecDot(nl, refDir);
shadeResult = (M_PI * cosine * vecMul(diff, nextRayResult.light)) / (1 - trans) + emissive;
}
#endif
rayResult.light = shadeResult;
}
else
{
rayResult.light = para.envLight;
}
return rayResult;
}
template <>
__device__ ShootRayResult pt0_normalRay<NORMALRAY_BOUND_MAX>(const Parameters para, const CURay& ray, RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures
, curandState *randstate, const uint frameN)
{
ShootRayResult rayResult;
rayResult.light.x = rayResult.light.y = rayResult.light.z = 0.f;
return rayResult;
}
uint32 WangHash(uint32 a) {
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
__global__ void pt0_kernel(const Parameters para, float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov,
float width, float height, RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures
, uint32 frameN, uint32 hashedFrameN, float* result, float* accResult)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint ind = (y * width + x) * 3;
int threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height;
float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f);
curandState randstate;
curand_init(hashedFrameN + ind, 0, 0, &randstate);
u = u + (curand_uniform(&randstate) - 0.5f) / width;
v = v + (curand_uniform(&randstate) - 0.5f) / height;
float3 dir = normalize(camRight * u + camUp * v + camDir);
CURay ray(camPos, dir);
ShootRayResult rayResult = pt0_normalRay(para, ray, vertices, triangles, materials, textures, &randstate, frameN);
float resultInf = 1.f / (float)(frameN + 1);
float oldInf = 1.f - resultInf;
result[ind] = max(resultInf * rayResult.light.x + oldInf * result[ind], 0.f);
result[ind + 1] = max(resultInf * rayResult.light.y + oldInf * result[ind + 1], 0.f);
result[ind + 2] = max(resultInf * rayResult.light.z + oldInf * result[ind + 2], 0.f);
}
void CleanMem()
{
freeAllBVHCudaMem();
CUFREE(g_devResultData);
CUFREE(g_devAccResultData);
}
bool Render(NPMathHelper::Vec3 camPos, NPMathHelper::Vec3 camDir, NPMathHelper::Vec3 camUp, float fov, RTScene* scene
, float width, float height, float* result)
{
// Check and allocate everything
if (!scene || !scene->GetCompactBVH()->IsValid())
return false;
NPMathHelper::Vec3 camRight = camDir.cross(camUp).normalize();
camUp = camRight.cross(camDir).normalize();
g_matLastCamMat = g_matCurCamMat;
g_matCurCamMat = NPMathHelper::Mat4x4::lookAt(camPos, camPos + camDir, camUp);
g_uCurFrameN = (g_matLastCamMat != g_matCurCamMat) ? 0 : g_uCurFrameN + 1;
unsigned char checksum = *(g_traceLimitDepth0.GetUint()) ^ *(g_traceLimitDepth1.GetUint()) ^ *(g_traceLimitDepth2.GetUint())
^ *(g_traceLimitTime0.GetUint()) ^ *(g_traceLimitTime1.GetUint()) ^ *(g_traceLimitTime2.GetUint())
^ (uint32)(*(g_envLightR.GetFloat()) * 10000.f) ^ (uint32)(*(g_envLightG.GetFloat()) * 10000.f) ^ (uint32)(*(g_envLightB.GetFloat()) * 10000.f);
if (!g_bIsCudaInit || scene->GetIsCudaDirty() || g_paraCheckSum != checksum)
{
g_paraCheckSum = checksum;
g_matLastCamMat = g_matCurCamMat;
g_uCurFrameN = 0;
initAllSceneCudaMem(scene);
}
else if (scene->GetIsCudaMaterialDirty())
{
updateAllSceneMaterialsCudaMem(scene);
g_uCurFrameN = 0;
}
if (!g_bIsCudaInit)
return false;
if (!g_devResultData || !g_devAccResultData || g_resultDataSize != (sizeof(float) * 3 * width * height))
{
g_resultDataSize = sizeof(float) * 3 * width * height;
CUFREE(g_devResultData);
cudaMalloc((void**)&g_devResultData, g_resultDataSize);
CUFREE(g_devAccResultData);
cudaMalloc((void**)&g_devAccResultData, g_resultDataSize);
}
float3 f3CamPos = V32F3(camPos);
float3 f3CamUp = V32F3(camUp);
float3 f3CamDir = V32F3(camDir);
float3 f3CamRight = V32F3(camRight);
Parameters para;
para.traceLimitDepth0 = *g_traceLimitDepth0.GetUint();
para.traceLimitDepth1 = *g_traceLimitDepth1.GetUint();
para.traceLimitDepth2 = *g_traceLimitDepth2.GetUint();
para.traceLimitTime0 = *g_traceLimitTime0.GetUint();
para.traceLimitTime1 = *g_traceLimitTime1.GetUint();
para.traceLimitTime2 = *g_traceLimitTime2.GetUint();
para.envLight.x = *g_envLightR.GetFloat();
para.envLight.y = *g_envLightG.GetFloat();
para.envLight.z = *g_envLightB.GetFloat();
// Kernel go here
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid(width / block.x, height / block.y, 1);
pt0_kernel << < grid, block >> > (para, f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height, g_devVertices, g_devTriangles, g_devMaterials, g_devTextures
, g_uCurFrameN, WangHash(g_uCurFrameN), g_devResultData, g_devAccResultData);
// Copy result to host
cudaMemcpy(result, g_devResultData, g_resultDataSize, cudaMemcpyDeviceToHost);
return true;
}
}
|
a6c09ff2e0b7d531cbb8cf44916b768c719c463c.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMasked.cu"
#else
THC_API void
THCTensor_(maskedFill)(THCState* state,
THCTensor *tensor, THCudaByteTensor *mask, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, mask));
THArgCheck(THCTensor_(nElement)(state, tensor) ==
THCudaByteTensor_nElement(state, mask),
2, "sizes do not match");
if (!THC_pointwiseApply2(state, tensor, mask,
TensorMaskedFillOp<real, unsigned char>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(maskedFillByte)(THCState* state,
THCTensor *tensor, THByteTensor *mask, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor));
THLongStorage* maskSizes = THByteTensor_newSizeOf(mask);
THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, maskSizes, NULL);
THLongStorage_free(maskSizes);
THCudaByteTensor_copyByte(state, maskCuda, mask);
THCTensor_(maskedFill)(state, tensor, maskCuda, value);
THCudaByteTensor_free(state, maskCuda);
}
THC_API void
THCTensor_(maskedCopy)(THCState* state,
THCTensor *tensor, THCudaByteTensor *mask, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask));
ptrdiff_t maskSize = THCudaByteTensor_nElement(state, mask);
ptrdiff_t tensorSize = THCTensor_(nElement)(state, tensor);
ptrdiff_t srcSize = THCTensor_(nElement)(state, src);
// `mask` and `tensor` must have the same number of elements
THArgCheck(maskSize == tensorSize, 2,
"mask and tensor must have the same number of elements");
// Determine our output size
ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask);
// The number of `1` elements present in the mask must be <= the
// number of elements available in `src`
if (totalElements > srcSize) {
THArgCheck(false, 2, "source nElements must be == mask `1` elements");
}
// FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed
// iterator prefix sums? Convert `mask` to the same datatype as what
// we're accumulating the prefix sum in (int64_t) to get around it
THCudaLongTensor* maskLong = THCudaLongTensor_new(state);
THLongStorage* maskSizes = THCudaByteTensor_newSizeOf(state, mask);
THCudaLongTensor_resize(state, maskLong, maskSizes, NULL);
THCudaLongTensor_copyCudaByte(state, maskLong, mask);
// Use a prefix sum to determine the output locations of the masked elements
THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state);
THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, NULL);
THLongStorage_free(maskSizes);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<int64_t>
maskData(THCudaLongTensor_data(state, maskLong));
thrust::device_ptr<int64_t>
maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum));
thrust::exclusive_scan(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
maskData,
maskData + THCudaLongTensor_nElement(state, maskLong),
maskPrefixSumData);
// We are getting elements from `src` based on an offset from
// `maskPrefixSum`, so that should be made contiguous too
THCTensor* contigSrc = THCTensor_(newContiguous)(state, src);
// update `tensor` where `mask` == 1 but pull from `src` at
// maskPrefixSum
bool status = THC_pointwiseApply3(
state, tensor, mask, maskPrefixSum,
TensorMaskedCopyOp<real, unsigned char, int64_t>(
THCTensor_(data)(state, contigSrc)));
THCTensor_(free)(state, contigSrc);
THCudaLongTensor_free(state, maskLong);
THCudaLongTensor_free(state, maskPrefixSum);
THArgCheck(status, 2, CUTORCH_DIM_WARNING);
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(maskedCopyByte)(THCState* state,
THCTensor *tensor, THByteTensor *mask, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THLongStorage* maskSizes = THByteTensor_newSizeOf(mask);
THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, maskSizes, NULL);
THLongStorage_free(maskSizes);
THCudaByteTensor_copyByte(state, maskCuda, mask);
THCTensor_(maskedCopy)(state, tensor, maskCuda, src);
THCudaByteTensor_free(state, maskCuda);
}
THC_API void
THCTensor_(maskedSelect)(THCState* state,
THCTensor* tensor, THCTensor* src, THCudaByteTensor* mask) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask));
THArgCheck(THCudaByteTensor_nElement(state, mask) ==
THCTensor_(nElement)(state, src),
2, "sizes do not match");
// Determine our output size
ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask);
THCTensor* tensorContig = THCTensor_(newContiguous)(state, tensor);
THCTensor_(resize1d)(state, tensorContig, totalElements);
if (tensor != tensorContig) {
THCTensor_(resize1d)(state, tensor, totalElements);
}
// FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed
// iterator prefix sums? Convert `mask` to the same datatype as what
// we're accumulating the prefix sum in (int64_t) to get around it
THCudaLongTensor* maskLong = THCudaLongTensor_new(state);
THLongStorage* maskSizes = THCudaByteTensor_newSizeOf(state, mask);
THCudaLongTensor_resize(state, maskLong, maskSizes, NULL);
THCudaLongTensor_copyCudaByte(state, maskLong, mask);
// Use a prefix sum to determine the output locations of the masked elements
THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state);
THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, NULL);
THLongStorage_free(maskSizes);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<int64_t>
maskData(THCudaLongTensor_data(state, maskLong));
thrust::device_ptr<int64_t>
maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum));
thrust::exclusive_scan(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
maskData,
maskData + THCudaLongTensor_nElement(state, maskLong),
maskPrefixSumData);
// Then copy over the masked elements at their desired output index
bool status = THC_pointwiseApply3(
state, mask, maskPrefixSum,
src, TensorMaskedSelectOp<real, unsigned char, int64_t>(
THCTensor_(data)(state, tensor)));
THCudaLongTensor_free(state, maskLong);
THCudaLongTensor_free(state, maskPrefixSum);
if (tensor != tensorContig) {
THCTensor_(freeCopyTo)(state, tensorContig, tensor);
} else {
THCTensor_(free)(state, tensorContig);
}
THArgCheck(status, 2, CUTORCH_DIM_WARNING);
THCudaCheck(hipGetLastError());
}
// FIXME: remove now that we have THCudaByteTensor?
THC_API void
THCTensor_(maskedSelectByte)(THCState* state,
THCTensor *tensor, THCTensor *src, THByteTensor *mask)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THLongStorage* maskSizes = THByteTensor_newSizeOf(mask);
THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, maskSizes, NULL);
THLongStorage_free(maskSizes);
THCudaByteTensor_copyByte(state, maskCuda, mask);
THCTensor_(maskedSelect)(state, tensor, src, maskCuda);
THCudaByteTensor_free(state, maskCuda);
}
#endif
|
a6c09ff2e0b7d531cbb8cf44916b768c719c463c.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMasked.cu"
#else
THC_API void
THCTensor_(maskedFill)(THCState* state,
THCTensor *tensor, THCudaByteTensor *mask, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, mask));
THArgCheck(THCTensor_(nElement)(state, tensor) ==
THCudaByteTensor_nElement(state, mask),
2, "sizes do not match");
if (!THC_pointwiseApply2(state, tensor, mask,
TensorMaskedFillOp<real, unsigned char>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(maskedFillByte)(THCState* state,
THCTensor *tensor, THByteTensor *mask, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor));
THLongStorage* maskSizes = THByteTensor_newSizeOf(mask);
THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, maskSizes, NULL);
THLongStorage_free(maskSizes);
THCudaByteTensor_copyByte(state, maskCuda, mask);
THCTensor_(maskedFill)(state, tensor, maskCuda, value);
THCudaByteTensor_free(state, maskCuda);
}
THC_API void
THCTensor_(maskedCopy)(THCState* state,
THCTensor *tensor, THCudaByteTensor *mask, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask));
ptrdiff_t maskSize = THCudaByteTensor_nElement(state, mask);
ptrdiff_t tensorSize = THCTensor_(nElement)(state, tensor);
ptrdiff_t srcSize = THCTensor_(nElement)(state, src);
// `mask` and `tensor` must have the same number of elements
THArgCheck(maskSize == tensorSize, 2,
"mask and tensor must have the same number of elements");
// Determine our output size
ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask);
// The number of `1` elements present in the mask must be <= the
// number of elements available in `src`
if (totalElements > srcSize) {
THArgCheck(false, 2, "source nElements must be == mask `1` elements");
}
// FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed
// iterator prefix sums? Convert `mask` to the same datatype as what
// we're accumulating the prefix sum in (int64_t) to get around it
THCudaLongTensor* maskLong = THCudaLongTensor_new(state);
THLongStorage* maskSizes = THCudaByteTensor_newSizeOf(state, mask);
THCudaLongTensor_resize(state, maskLong, maskSizes, NULL);
THCudaLongTensor_copyCudaByte(state, maskLong, mask);
// Use a prefix sum to determine the output locations of the masked elements
THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state);
THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, NULL);
THLongStorage_free(maskSizes);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<int64_t>
maskData(THCudaLongTensor_data(state, maskLong));
thrust::device_ptr<int64_t>
maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum));
thrust::exclusive_scan(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
maskData,
maskData + THCudaLongTensor_nElement(state, maskLong),
maskPrefixSumData);
// We are getting elements from `src` based on an offset from
// `maskPrefixSum`, so that should be made contiguous too
THCTensor* contigSrc = THCTensor_(newContiguous)(state, src);
// update `tensor` where `mask` == 1 but pull from `src` at
// maskPrefixSum
bool status = THC_pointwiseApply3(
state, tensor, mask, maskPrefixSum,
TensorMaskedCopyOp<real, unsigned char, int64_t>(
THCTensor_(data)(state, contigSrc)));
THCTensor_(free)(state, contigSrc);
THCudaLongTensor_free(state, maskLong);
THCudaLongTensor_free(state, maskPrefixSum);
THArgCheck(status, 2, CUTORCH_DIM_WARNING);
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(maskedCopyByte)(THCState* state,
THCTensor *tensor, THByteTensor *mask, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THLongStorage* maskSizes = THByteTensor_newSizeOf(mask);
THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, maskSizes, NULL);
THLongStorage_free(maskSizes);
THCudaByteTensor_copyByte(state, maskCuda, mask);
THCTensor_(maskedCopy)(state, tensor, maskCuda, src);
THCudaByteTensor_free(state, maskCuda);
}
THC_API void
THCTensor_(maskedSelect)(THCState* state,
THCTensor* tensor, THCTensor* src, THCudaByteTensor* mask) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask));
THArgCheck(THCudaByteTensor_nElement(state, mask) ==
THCTensor_(nElement)(state, src),
2, "sizes do not match");
// Determine our output size
ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask);
THCTensor* tensorContig = THCTensor_(newContiguous)(state, tensor);
THCTensor_(resize1d)(state, tensorContig, totalElements);
if (tensor != tensorContig) {
THCTensor_(resize1d)(state, tensor, totalElements);
}
// FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed
// iterator prefix sums? Convert `mask` to the same datatype as what
// we're accumulating the prefix sum in (int64_t) to get around it
THCudaLongTensor* maskLong = THCudaLongTensor_new(state);
THLongStorage* maskSizes = THCudaByteTensor_newSizeOf(state, mask);
THCudaLongTensor_resize(state, maskLong, maskSizes, NULL);
THCudaLongTensor_copyCudaByte(state, maskLong, mask);
// Use a prefix sum to determine the output locations of the masked elements
THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state);
THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, NULL);
THLongStorage_free(maskSizes);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<int64_t>
maskData(THCudaLongTensor_data(state, maskLong));
thrust::device_ptr<int64_t>
maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum));
thrust::exclusive_scan(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
maskData,
maskData + THCudaLongTensor_nElement(state, maskLong),
maskPrefixSumData);
// Then copy over the masked elements at their desired output index
bool status = THC_pointwiseApply3(
state, mask, maskPrefixSum,
src, TensorMaskedSelectOp<real, unsigned char, int64_t>(
THCTensor_(data)(state, tensor)));
THCudaLongTensor_free(state, maskLong);
THCudaLongTensor_free(state, maskPrefixSum);
if (tensor != tensorContig) {
THCTensor_(freeCopyTo)(state, tensorContig, tensor);
} else {
THCTensor_(free)(state, tensorContig);
}
THArgCheck(status, 2, CUTORCH_DIM_WARNING);
THCudaCheck(cudaGetLastError());
}
// FIXME: remove now that we have THCudaByteTensor?
THC_API void
THCTensor_(maskedSelectByte)(THCState* state,
THCTensor *tensor, THCTensor *src, THByteTensor *mask)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THLongStorage* maskSizes = THByteTensor_newSizeOf(mask);
THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, maskSizes, NULL);
THLongStorage_free(maskSizes);
THCudaByteTensor_copyByte(state, maskCuda, mask);
THCTensor_(maskedSelect)(state, tensor, src, maskCuda);
THCudaByteTensor_free(state, maskCuda);
}
#endif
|
9af3d0899f22a69ac754a2e785db8c8072d4db26.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible?
// Assigns every element in an array with its index.
// nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple
#include <stdio.h>
#include <math.h>
const int N = 16;
const int blocksize = 16;
__global__
void simple(float *c)
{
c[threadIdx.x] = sqrt(c[threadIdx.x]);
}
int main()
{
float *original = new float[N];
float *target = new float[N];
float *cd;
const int size = N*sizeof(float);
for (int i = 0; i < N; ++i)
{
original[i] = i + 1;
}
hipMalloc( (void**)&cd, size );
hipMemcpy( cd, original, size, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( simple), dim3(dimGrid), dim3(dimBlock), 0, 0, cd);
hipDeviceSynchronize();
hipMemcpy( target, cd, size, hipMemcpyDeviceToHost );
hipFree( cd );
for (int i = 0; i < N; i++)
{
printf("%f ", sqrt(original[i]));
}
printf("\n");
for (int i = 0; i < N; i++)
{
printf("%f ", target[i]);
}
printf("\n");
delete[] original;
delete[] target;
printf("done\n");
return EXIT_SUCCESS;
}
|
9af3d0899f22a69ac754a2e785db8c8072d4db26.cu
|
// Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible?
// Assigns every element in an array with its index.
// nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple
#include <stdio.h>
#include <math.h>
const int N = 16;
const int blocksize = 16;
__global__
void simple(float *c)
{
c[threadIdx.x] = sqrt(c[threadIdx.x]);
}
int main()
{
float *original = new float[N];
float *target = new float[N];
float *cd;
const int size = N*sizeof(float);
for (int i = 0; i < N; ++i)
{
original[i] = i + 1;
}
cudaMalloc( (void**)&cd, size );
cudaMemcpy( cd, original, size, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
simple<<<dimGrid, dimBlock>>>(cd);
cudaThreadSynchronize();
cudaMemcpy( target, cd, size, cudaMemcpyDeviceToHost );
cudaFree( cd );
for (int i = 0; i < N; i++)
{
printf("%f ", sqrt(original[i]));
}
printf("\n");
for (int i = 0; i < N; i++)
{
printf("%f ", target[i]);
}
printf("\n");
delete[] original;
delete[] target;
printf("done\n");
return EXIT_SUCCESS;
}
|
ac3d76415a7a44be0cdecb510f5c37db21be5f5e.hip
|
// !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
ac3d76415a7a44be0cdecb510f5c37db21be5f5e.cu
|
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_plus_multiplies_dsrgemm_nn_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::plus<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_plus_multiplies_dsrgemm_nn_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
58d31df5273e6c76873dcdc9a1197fd33d646e3f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
extern __shared__ float s_filter[];
int fidx = threadIdx.x + threadIdx.y * blockDim.x;
if (fidx < filterWidth * filterWidth) {
s_filter[fidx] = filter[fidx];
}
__syncthreads();
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < numCols && y < numRows) {
int idx = y * numCols + x;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_y = -filterWidth/2; filter_y <= filterWidth/2; ++filter_y) {
for (int filter_x = -filterWidth/2; filter_x <= filterWidth/2; ++filter_x) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_y = min(max(y + filter_y, 0), static_cast<int>(numRows - 1));
int image_x = min(max(x + filter_x, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_y * numCols + image_x]);
float filter_value = s_filter[(filter_y + filterWidth/2) * filterWidth + filter_x + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[idx] = result;
}
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < numCols && y < numRows) {
int idx = y * numCols + x;
redChannel[idx] = inputImageRGBA[idx].x;
greenChannel[idx] = inputImageRGBA[idx].y;
blueChannel[idx] = inputImageRGBA[idx].z;
}
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
int dim = 32;
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(dim, dim, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize( (numCols + dim - 1)/dim, (numRows + dim - 1)/dim, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), sizeof(float) * filterWidth * filterWidth, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), sizeof(float) * filterWidth * filterWidth, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), sizeof(float) * filterWidth * filterWidth, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
|
58d31df5273e6c76873dcdc9a1197fd33d646e3f.cu
|
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
extern __shared__ float s_filter[];
int fidx = threadIdx.x + threadIdx.y * blockDim.x;
if (fidx < filterWidth * filterWidth) {
s_filter[fidx] = filter[fidx];
}
__syncthreads();
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < numCols && y < numRows) {
int idx = y * numCols + x;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_y = -filterWidth/2; filter_y <= filterWidth/2; ++filter_y) {
for (int filter_x = -filterWidth/2; filter_x <= filterWidth/2; ++filter_x) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_y = min(max(y + filter_y, 0), static_cast<int>(numRows - 1));
int image_x = min(max(x + filter_x, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_y * numCols + image_x]);
float filter_value = s_filter[(filter_y + filterWidth/2) * filterWidth + filter_x + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[idx] = result;
}
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < numCols && y < numRows) {
int idx = y * numCols + x;
redChannel[idx] = inputImageRGBA[idx].x;
greenChannel[idx] = inputImageRGBA[idx].y;
blueChannel[idx] = inputImageRGBA[idx].z;
}
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
int dim = 32;
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(dim, dim, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize( (numCols + dim - 1)/dim, (numRows + dim - 1)/dim, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize, sizeof(float) * filterWidth * filterWidth>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize, sizeof(float) * filterWidth * filterWidth>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize, sizeof(float) * filterWidth * filterWidth>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
897f7c7b8117f64ff541d0bbed5265b031b5342b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "library.h"
#include <cmath>
#include <stdio.h>
#include <string>
using namespace std;
//Uncomment to TEST
//#define TEST_CTF
//#define TEST_PYRAMID
void coarseToFine(cv::Mat f, int heightK, int widthK, ParamGammaCor blind_params, Param params, int w, int h, int nc)
{
// printf("\nInside coarseToFine\n");
//****PADDING
//**Required dimensions of output image - Padded
int top = ::floor(heightK/2);
int bottom = ::floor(heightK/2);
int left = ::floor(widthK/2);
int right = ::floor(widthK/2);
//**Define: Output image - Padded
cv::Mat u(f.rows+top+bottom, f.cols+left+right, f.type());
cv::copyMakeBorder( f, u, top, bottom, left, right, cv::BORDER_REPLICATE, 0 );
// cout << "Padded image dimensions: w * h: " << u.size().width << " x " << u.size().height;
// cout<< " and No. of Channels " <<u.channels()<<endl;
//**Define: Kernel
float kValue = 1.0/(heightK * widthK);
// float kValue = 1.0;
cv::Mat k(heightK, widthK, CV_32FC1);
for (int y = 0; y<k.rows; y++)
{
for(int x = 0; x<(k.cols); x++)
{
k.at<float>(y, x) = kValue;
}
}
// cout<<k<<endl;
#ifdef TEST_PYRAMID
buildPyramid(heightK, widthK, params.finalLambda, params.lambdahultiplier, w, h, nc, params.kernelSizehultiplier, params.maxLambda);
#else
ReturnBuildPyramid rbp = buildPyramid(heightK, widthK, params.finalLambda, params.lambdaMultiplier, w, h, nc, params.kernelSizeMultiplier, params.maxLambda);
#endif
// cout<<"Back to ctf"<<endl;
//**Multiscale Processing
int scales = rbp.scales;
int hs, ws; //Ms = hs
int hKs,wKs; //Mks = hKs
int hu, wu, scale; //Mu = hu; wu = wu
float lambda;
//For Output image
string a, c, d, eK, fK, gK, aI, cI, dI;
char b;
aI = "Scaled Input";
a = "Deblurred Output";
eK = "Kernel";
cv::Mat uCloned2;
cv::Mat kCloned2;
Timer mytimer;
for (scale=scales; scale>= 1; scale--)
{
mytimer.start();
//**Required dimensions of image and kernel (from Pyramid function)
hs = rbp.hp[scale]; //Ms = hs
ws = rbp.wp[scale]; //Ns = ws
hKs = rbp.hKp[scale];
wKs = rbp.wKp[scale];
lambda = rbp.lambdas[scale];
//**Defining Image from (pyramid scheme)
cv::Mat fs(hs, ws, f.type());
cv::Mat fCloned = f.clone();
cv::resize(fCloned, fs, cv::Size(ws, hs), 0, 0, CV_INTER_CUBIC);
//**Required dimensions of u
hu = (hs + hKs - 1);
wu = (ws + wKs - 1);
//**Resizing u and k
cv::Mat uDest;
cv::Mat kDest;
cv::resize(u, uDest, cv::Size(wu, hu), 0, 0, CV_INTER_CUBIC);
cv::resize(k, kDest, cv::Size(wKs, hKs), 0, 0, CV_INTER_CUBIC);
//**Making negative vales as 0 and normalization
//cout<<kDest<<endl;
kDest=KernelProjection(kDest).clone();
//cout<<kDest<<endl;
/*float sum = 0;
for (int y = 0; y<kDest.rows; y++)
{
for(int x = 0; x<kDest.cols; x++)
{
if (kDest.at<float>(y,x)<0)
{
kDest.at<float>(y,x) = 0;
}
sum = sum + kDest.at<float>(y,x);
}
}
for (int y = 0; y<kDest.rows; y++)
{
for(int x = 0; x<kDest.cols; x++)
{
kDest.at<float>(y,x) = kDest.at<float>(y,x) / sum;
}
}*/
cv::Mat fsOut = fs.clone();
cout<<"Scaled Number: "<<scale<<endl<<endl<<endl<<endl<<endl<<endl;
cout<<"Scaled Input size: "<<fsOut.rows<<" x "<<fsOut.cols<<endl<<endl<<endl<<endl<<endl<<endl;
cout<<"k Before Blind size: "<<kDest.rows<<" x "<<kDest.cols<<endl<<endl<<endl<<endl<<endl<<endl;
//**blind function
cv::Mat fsCloned = fs.clone();
cv::Mat uDestCloned = uDest.clone();
BlindReturn rb = blind(fsCloned, lambda, uDestCloned, kDest, scale, 1000);
k = rb.k.clone();
cout<<"kernel after Blind size: "<<k.rows<<" x "<<k.cols<<endl<<endl<<endl<<endl<<endl<<endl;
//**dec function
cout<<"u sent to dec size: "<<uDestCloned.rows<<" x "<<uDestCloned.cols<<endl<<endl<<endl<<endl<<endl<<endl;
kCloned2 = k.clone();
u = dec(fsCloned, lambda, uDestCloned, kCloned2, scale, 1000);
cout<<"Image deblurred size: "<<u.rows<<" x "<<u.cols<<endl<<endl<<endl<<endl<<endl<<endl;
uCloned2 = u.clone();
mytimer.end();
cout<<"Time for scale number"<<scale<<" : "<<mytimer.get()<<"s"<<endl;
//**SHOW IMAGE
int xImage = 40*scale;
int yImage = 40;
// Printing Scaled Input
showImage("fs", fsOut, 40, 40);
b = static_cast<char> (scale+48);
cI = aI + b;
showImage(cI, fsOut, xImage, yImage);
dI = cI + ".png";
cv::imwrite(dI, fsOut*255.f);
// Printing Kernel
double minVal,maxVal;
cv::minMaxLoc(kCloned2,&minVal,&maxVal);
cv::Mat KOut=kCloned2/maxVal;
fK = eK + b;
showImage(fK, KOut, xImage, yImage);
gK = fK + ".png";
cv::imwrite(gK, KOut*255.f);
// Printing Sharpened Image
c = a + b;
cv::Mat uOut = u.clone();
showImage(c, uOut, xImage, yImage);
d = c + ".png";
cv::imwrite(d, uOut*255.f);
}
// cout<<endl<<"Exiting coarseToFine"<<endl;
double minVal,maxVal;
cv::minMaxLoc(kCloned2,&minVal,&maxVal);
cv::Mat KOut=kCloned2/maxVal;
showImage("Final Kernel", KOut, 40, 40);
cv::imwrite("FinalKernel.png", KOut*255.f);
showImage("Deblurred Image", uCloned2, 240,40);
cv::imwrite("DeblurredImage.png", uCloned2*255.f);
return;
}
|
897f7c7b8117f64ff541d0bbed5265b031b5342b.cu
|
#include "library.h"
#include <cmath>
#include <stdio.h>
#include <string>
using namespace std;
//Uncomment to TEST
//#define TEST_CTF
//#define TEST_PYRAMID
void coarseToFine(cv::Mat f, int heightK, int widthK, ParamGammaCor blind_params, Param params, int w, int h, int nc)
{
// printf("\nInside coarseToFine\n");
//****PADDING
//**Required dimensions of output image - Padded
int top = std::floor(heightK/2);
int bottom = std::floor(heightK/2);
int left = std::floor(widthK/2);
int right = std::floor(widthK/2);
//**Define: Output image - Padded
cv::Mat u(f.rows+top+bottom, f.cols+left+right, f.type());
cv::copyMakeBorder( f, u, top, bottom, left, right, cv::BORDER_REPLICATE, 0 );
// cout << "Padded image dimensions: w * h: " << u.size().width << " x " << u.size().height;
// cout<< " and No. of Channels " <<u.channels()<<endl;
//**Define: Kernel
float kValue = 1.0/(heightK * widthK);
// float kValue = 1.0;
cv::Mat k(heightK, widthK, CV_32FC1);
for (int y = 0; y<k.rows; y++)
{
for(int x = 0; x<(k.cols); x++)
{
k.at<float>(y, x) = kValue;
}
}
// cout<<k<<endl;
#ifdef TEST_PYRAMID
buildPyramid(heightK, widthK, params.finalLambda, params.lambdahultiplier, w, h, nc, params.kernelSizehultiplier, params.maxLambda);
#else
ReturnBuildPyramid rbp = buildPyramid(heightK, widthK, params.finalLambda, params.lambdaMultiplier, w, h, nc, params.kernelSizeMultiplier, params.maxLambda);
#endif
// cout<<"Back to ctf"<<endl;
//**Multiscale Processing
int scales = rbp.scales;
int hs, ws; //Ms = hs
int hKs,wKs; //Mks = hKs
int hu, wu, scale; //Mu = hu; wu = wu
float lambda;
//For Output image
string a, c, d, eK, fK, gK, aI, cI, dI;
char b;
aI = "Scaled Input";
a = "Deblurred Output";
eK = "Kernel";
cv::Mat uCloned2;
cv::Mat kCloned2;
Timer mytimer;
for (scale=scales; scale>= 1; scale--)
{
mytimer.start();
//**Required dimensions of image and kernel (from Pyramid function)
hs = rbp.hp[scale]; //Ms = hs
ws = rbp.wp[scale]; //Ns = ws
hKs = rbp.hKp[scale];
wKs = rbp.wKp[scale];
lambda = rbp.lambdas[scale];
//**Defining Image from (pyramid scheme)
cv::Mat fs(hs, ws, f.type());
cv::Mat fCloned = f.clone();
cv::resize(fCloned, fs, cv::Size(ws, hs), 0, 0, CV_INTER_CUBIC);
//**Required dimensions of u
hu = (hs + hKs - 1);
wu = (ws + wKs - 1);
//**Resizing u and k
cv::Mat uDest;
cv::Mat kDest;
cv::resize(u, uDest, cv::Size(wu, hu), 0, 0, CV_INTER_CUBIC);
cv::resize(k, kDest, cv::Size(wKs, hKs), 0, 0, CV_INTER_CUBIC);
//**Making negative vales as 0 and normalization
//cout<<kDest<<endl;
kDest=KernelProjection(kDest).clone();
//cout<<kDest<<endl;
/*float sum = 0;
for (int y = 0; y<kDest.rows; y++)
{
for(int x = 0; x<kDest.cols; x++)
{
if (kDest.at<float>(y,x)<0)
{
kDest.at<float>(y,x) = 0;
}
sum = sum + kDest.at<float>(y,x);
}
}
for (int y = 0; y<kDest.rows; y++)
{
for(int x = 0; x<kDest.cols; x++)
{
kDest.at<float>(y,x) = kDest.at<float>(y,x) / sum;
}
}*/
cv::Mat fsOut = fs.clone();
cout<<"Scaled Number: "<<scale<<endl<<endl<<endl<<endl<<endl<<endl;
cout<<"Scaled Input size: "<<fsOut.rows<<" x "<<fsOut.cols<<endl<<endl<<endl<<endl<<endl<<endl;
cout<<"k Before Blind size: "<<kDest.rows<<" x "<<kDest.cols<<endl<<endl<<endl<<endl<<endl<<endl;
//**blind function
cv::Mat fsCloned = fs.clone();
cv::Mat uDestCloned = uDest.clone();
BlindReturn rb = blind(fsCloned, lambda, uDestCloned, kDest, scale, 1000);
k = rb.k.clone();
cout<<"kernel after Blind size: "<<k.rows<<" x "<<k.cols<<endl<<endl<<endl<<endl<<endl<<endl;
//**dec function
cout<<"u sent to dec size: "<<uDestCloned.rows<<" x "<<uDestCloned.cols<<endl<<endl<<endl<<endl<<endl<<endl;
kCloned2 = k.clone();
u = dec(fsCloned, lambda, uDestCloned, kCloned2, scale, 1000);
cout<<"Image deblurred size: "<<u.rows<<" x "<<u.cols<<endl<<endl<<endl<<endl<<endl<<endl;
uCloned2 = u.clone();
mytimer.end();
cout<<"Time for scale number"<<scale<<" : "<<mytimer.get()<<"s"<<endl;
//**SHOW IMAGE
int xImage = 40*scale;
int yImage = 40;
// Printing Scaled Input
showImage("fs", fsOut, 40, 40);
b = static_cast<char> (scale+48);
cI = aI + b;
showImage(cI, fsOut, xImage, yImage);
dI = cI + ".png";
cv::imwrite(dI, fsOut*255.f);
// Printing Kernel
double minVal,maxVal;
cv::minMaxLoc(kCloned2,&minVal,&maxVal);
cv::Mat KOut=kCloned2/maxVal;
fK = eK + b;
showImage(fK, KOut, xImage, yImage);
gK = fK + ".png";
cv::imwrite(gK, KOut*255.f);
// Printing Sharpened Image
c = a + b;
cv::Mat uOut = u.clone();
showImage(c, uOut, xImage, yImage);
d = c + ".png";
cv::imwrite(d, uOut*255.f);
}
// cout<<endl<<"Exiting coarseToFine"<<endl;
double minVal,maxVal;
cv::minMaxLoc(kCloned2,&minVal,&maxVal);
cv::Mat KOut=kCloned2/maxVal;
showImage("Final Kernel", KOut, 40, 40);
cv::imwrite("FinalKernel.png", KOut*255.f);
showImage("Deblurred Image", uCloned2, 240,40);
cv::imwrite("DeblurredImage.png", uCloned2*255.f);
return;
}
|
91ad0ec6d32d417428800e06f28dcd6546aae47f.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright 2020, Tencent Inc.
// All rights reserved.
//
// @author shaorunwang <[email protected]>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <thrust/extrema.h>
#include <fstream>
#include <iostream>
//#include "NvInfer.h"
//#include "sparse_fwffm_plugin.h"
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m, n) (((m) / (n)) + ((m) % (n) > 0))
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
namespace nvinfer1 {
namespace sparse_fwffm {
__global__ void ComputeBatchBoundary(const int32_t* index_tensor, int32_t total_feature_num,
int32_t batch_size, int32_t* sample_feature_start_addr,
int32_t* sample_feature_end_addr) {
int32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < total_feature_num) {
int32_t idx = index_tensor[tid];
// atomicMin(sample_feature_start_addr + idx, tid);
// atomicMax(sample_feature_end_addr + idx, tid + 1);
if (tid > 0) {
int32_t pre_idx = index_tensor[tid - 1];
for (int32_t i = idx; i > pre_idx; --i) {
sample_feature_start_addr[i] = tid;
}
} else {
int32_t first_idx = index_tensor[0];
for (int32_t i = 0; i <= first_idx; ++i) {
sample_feature_start_addr[i] = 0;
}
int32_t last_idx = index_tensor[total_feature_num - 1];
for (int32_t i = batch_size - 1; i > last_idx; --i) {
sample_feature_start_addr[i] = total_feature_num;
}
sample_feature_start_addr[batch_size] = total_feature_num;
}
}
}
template <typename T = float, int32_t warp_num = 32>
__global__ void ProcessCommonPart(int32_t embedding_size, int32_t field_num, int32_t fw_field_num,
int32_t* sample_feature_start_addr, const T* weight_tensor,
const int32_t* field_tensor, T* gmem_fw_cross_mean_sum,
T* gmem_fw_cross_square_sum, int32_t* gmem_fw_field_map) {
int32_t warp_id = threadIdx.y;
int32_t lane_id = threadIdx.x;
int32_t global_warp_id = blockIdx.x * warp_num + warp_id;
int32_t total_global_warp_num = gridDim.x * warp_num;
int32_t tid = threadIdx.x + threadIdx.y * blockDim.x;
int32_t total_thread = warp_num * 32;
int32_t common_feature_num = sample_feature_start_addr[0];
for (int32_t wid = global_warp_id; wid < common_feature_num; wid += total_global_warp_num) {
int32_t field_1 = field_tensor[wid * 2] - 1;
int32_t fw_field_1 = field_tensor[wid * 2 + 1] - 1;
if (fw_field_1 < 0 || fw_field_1 >= fw_field_num || field_1 < 0 || field_1 >= field_num)
continue;
if (lane_id == 0) gmem_fw_field_map[fw_field_1] = field_1;
#pragma unroll
for (int32_t field_2 = 0; field_2 < field_num; field_2++) {
int32_t mem_field_offset = (field_2 * fw_field_num + fw_field_1) * embedding_size;
#pragma unroll
for (int32_t n = 0; n < (embedding_size + 31) / 32 * 32; n += 32) {
if (n + lane_id < embedding_size) {
T reg = weight_tensor[wid * embedding_size * field_num +
field_2 * embedding_size + n + lane_id];
atomicAdd(gmem_fw_cross_mean_sum + mem_field_offset + n + lane_id, reg);
}
}
}
int32_t mem_field_offset = fw_field_1 * embedding_size;
#pragma unroll
for (int32_t n = 0; n < (embedding_size + 31) / 32 * 32; n += 32) {
if (n + lane_id < embedding_size) {
T reg = weight_tensor[wid * embedding_size * field_num + field_1 * embedding_size +
n + lane_id];
float square = reg * reg;
atomicAdd(gmem_fw_cross_square_sum + mem_field_offset + n + lane_id, square);
}
}
}
}
template <typename T = float>
__global__ void BroadcastCommonPart(int32_t batch, int32_t embedding_size, int32_t field_num,
int32_t fw_field_num, T* gmem_fw_cross_mean_sum,
T* gmem_fw_cross_square_sum, T* output) {
int32_t lane_id = threadIdx.x;
int32_t fw_field_id = blockIdx.x % fw_field_num;
int32_t tid = lane_id + fw_field_id * embedding_size;
int32_t bid = blockIdx.x / fw_field_num;
// printf("bid : %d, tid: %d\n",bid, tid);
T Reg_square = gmem_fw_cross_square_sum[tid];
output[bid * (embedding_size * (field_num + 1) * fw_field_num) +
embedding_size * field_num * fw_field_num + tid] = Reg_square;
T Reg_mean_0 = gmem_fw_cross_mean_sum[tid];
T Reg_mean_1 = gmem_fw_cross_mean_sum[embedding_size * fw_field_num + tid];
output[bid * (embedding_size * (field_num + 1) * fw_field_num) + tid] = Reg_mean_0;
output[bid * (embedding_size * (field_num + 1) * fw_field_num) + embedding_size * fw_field_num +
tid] = Reg_mean_1;
}
template <typename T = float, int32_t warp_num = 32>
__device__ void ProcessSamplePart(
int32_t* smem_fw_field_map, int32_t* smem_fw_map_idx, T* gmem_cross_mean_sum,
T* gmem_cross_square_sum, T* gmem_fw_cross_mean_sum, T* gmem_fw_cross_square_sum,
int32_t* gmem_fw_field_map, int32_t embedding_size, int32_t field_num, int32_t fw_field_num,
int32_t this_sample_feature_num, int32_t this_sample_feature_start_addr,
int32_t sample_0_feature_num, int32_t sample_0_feature_start_addr, const T* weight_tensor,
const int* field_tensor, int32_t shared_mem_elements) {
constexpr int32_t total_thread = warp_num * 32;
int32_t warp_id = threadIdx.y;
int32_t lane_id = threadIdx.x;
int32_t tid = threadIdx.x + threadIdx.y * 32;
for (int32_t i = 0; i < fw_field_num; i += warp_num * 32) {
if (i + tid < fw_field_num) {
smem_fw_field_map[i + tid] = gmem_fw_field_map[i + tid];
}
}
for (int32_t i = fw_field_num; i < fw_field_num * 2; i += warp_num * 32) {
if (i + tid < fw_field_num * 2) {
smem_fw_field_map[i + tid] = -1;
}
}
__syncthreads();
// patch
if (blockIdx.x != 0) {
int32_t sample_0_start_row = warp_id + sample_0_feature_start_addr;
int32_t sample_0_end_row = sample_0_feature_num + sample_0_feature_start_addr;
for (int32_t wid = sample_0_start_row; wid < sample_0_end_row; wid += warp_num) {
int32_t field_1 = field_tensor[wid * 2] - 1;
int32_t fw_field_1 = field_tensor[wid * 2 + 1] - 1;
if (fw_field_1 < 0 || fw_field_1 >= fw_field_num || field_1 < 0 || field_1 >= field_num)
continue;
if (lane_id == 0) smem_fw_field_map[fw_field_1] = field_1;
}
}
// sample feature phase
int32_t common_fw_field_map_offset_for_ad = blockIdx.x > 0 ? fw_field_num : 0;
int32_t sample_start_row = warp_id + this_sample_feature_start_addr;
int32_t sample_end_row = this_sample_feature_num + this_sample_feature_start_addr;
for (int32_t wid = sample_start_row; wid < sample_end_row; wid += warp_num) {
int32_t field_1 = field_tensor[wid * 2] - 1;
int32_t fw_field_1 = field_tensor[wid * 2 + 1] - 1;
if (fw_field_1 < 0 || fw_field_1 >= fw_field_num || field_1 < 0 || field_1 >= field_num)
continue;
if (lane_id == 0) {
smem_fw_field_map[fw_field_1 + common_fw_field_map_offset_for_ad] = field_1;
}
#pragma unroll
for (int32_t field_2 = 0; field_2 < field_num; field_2++) {
int32_t mem_field_offset = (field_2 * fw_field_num + fw_field_1) * embedding_size;
#pragma unroll
for (int32_t n = 0; n < (embedding_size + 31) / 32 * 32; n += 32) {
T reg = T(0);
int32_t rd_offset =
wid * embedding_size * field_num + field_2 * embedding_size + n + lane_id;
T* wr_ptr = gmem_cross_mean_sum + mem_field_offset + n + lane_id;
if (n + lane_id < embedding_size) {
reg = weight_tensor[rd_offset];
atomicAdd(wr_ptr, reg);
}
}
}
int32_t mem_field_offset = fw_field_1 * embedding_size;
#pragma unroll
for (int32_t n = 0; n < (embedding_size + 31) / 32 * 32; n += 32) {
T reg = T(0);
T square = T(0);
int32_t rd_offset =
wid * embedding_size * field_num + field_1 * embedding_size + n + lane_id;
T* wr_ptr = gmem_cross_square_sum + mem_field_offset + n + lane_id;
if (n + lane_id < embedding_size) {
reg = weight_tensor[rd_offset];
square = reg * reg;
atomicAdd(wr_ptr, square);
}
}
}
__syncthreads();
int32_t i = 0;
for (int32_t i = 0; i < fw_field_num; i += total_thread) {
if (i + tid < fw_field_num) {
int32_t field_1 = smem_fw_field_map[common_fw_field_map_offset_for_ad + i + tid];
int32_t field_1_part1 = smem_fw_field_map[i + tid];
if (field_1 < 0 && field_1_part1 != 1) {
field_1 = field_1_part1;
}
smem_fw_field_map[i + tid] = field_1;
}
}
__syncthreads();
if (warp_id == 0 && lane_id == 0) {
int32_t cnt = 0;
for (int32_t i = 0; i < fw_field_num; i++) {
if (smem_fw_field_map[i] >= 0) {
smem_fw_map_idx[cnt++] = i;
}
}
smem_fw_map_idx[fw_field_num] = cnt;
}
__syncthreads();
}
template <typename T = float, int32_t warp_num = 32>
__device__ void ProcessSamplePart_share(
int32_t* smem_fw_field_map, int32_t* smem_fw_map_idx, T* smem_cross_mean_sum,
T* smem_cross_square_sum, T* gmem_fw_cross_mean_sum, T* gmem_fw_cross_square_sum,
int32_t* gmem_fw_field_map, int32_t embedding_size, int32_t field_num, int32_t fw_field_num,
int32_t this_sample_feature_num, int32_t this_sample_feature_start_addr,
int32_t sample_0_feature_num, int32_t sample_0_feature_start_addr, const T* weight_tensor,
const int* field_tensor, int32_t shared_mem_elements) {
constexpr int32_t total_thread = warp_num * 32;
int32_t warp_id = threadIdx.y;
int32_t lane_id = threadIdx.x;
int32_t tid = threadIdx.x + threadIdx.y * 32;
for (int32_t i = 0; i < fw_field_num; i += warp_num * 32) {
if (i + tid < fw_field_num) {
smem_fw_field_map[i + tid] = gmem_fw_field_map[i + tid];
}
}
for (int32_t i = fw_field_num; i < fw_field_num * 2; i += warp_num * 32) {
if (i + tid < fw_field_num * 2) {
smem_fw_field_map[i + tid] = -1;
}
}
for (int32_t i = tid; i < embedding_size * field_num * fw_field_num; i += warp_num * 32) {
smem_cross_mean_sum[i] = gmem_fw_cross_mean_sum[i];
if (i < embedding_size * fw_field_num) {
smem_cross_square_sum[i] = gmem_fw_cross_square_sum[i];
}
}
__syncthreads();
// patch
if (blockIdx.x != 0) {
int32_t sample_0_start_row = warp_id + sample_0_feature_start_addr;
int32_t sample_0_end_row = sample_0_feature_num + sample_0_feature_start_addr;
for (int32_t wid = sample_0_start_row; wid < sample_0_end_row; wid += warp_num) {
int32_t field_1 = field_tensor[wid * 2] - 1;
int32_t fw_field_1 = field_tensor[wid * 2 + 1] - 1;
if (fw_field_1 < 0 || fw_field_1 >= fw_field_num || field_1 < 0 || field_1 >= field_num)
continue;
if (lane_id == 0) smem_fw_field_map[fw_field_1] = field_1;
}
}
// sample feature phase
int32_t common_fw_field_map_offset_for_ad = blockIdx.x > 0 ? fw_field_num : 0;
int32_t sample_start_row = warp_id + this_sample_feature_start_addr;
int32_t sample_end_row = this_sample_feature_num + this_sample_feature_start_addr;
for (int32_t wid = sample_start_row; wid < sample_end_row; wid += warp_num) {
int32_t field_1 = field_tensor[wid * 2] - 1;
int32_t fw_field_1 = field_tensor[wid * 2 + 1] - 1;
if (fw_field_1 < 0 || fw_field_1 >= fw_field_num || field_1 < 0 || field_1 >= field_num)
continue;
if (lane_id == 0) {
smem_fw_field_map[fw_field_1 + common_fw_field_map_offset_for_ad] = field_1;
}
#pragma unroll
for (int32_t field_2 = 0; field_2 < field_num; field_2++) {
int32_t mem_field_offset = (field_2 * fw_field_num + fw_field_1) * embedding_size;
#pragma unroll
for (int32_t n = 0; n < (embedding_size + 31) / 32 * 32; n += 32) {
T reg = T(0);
int32_t rd_offset =
wid * embedding_size * field_num + field_2 * embedding_size + n + lane_id;
T* wr_ptr = smem_cross_mean_sum + mem_field_offset + n + lane_id;
if (n + lane_id < embedding_size) {
reg = weight_tensor[rd_offset];
atomicAdd(wr_ptr, reg);
}
}
}
int32_t mem_field_offset = fw_field_1 * embedding_size;
#pragma unroll
for (int32_t n = 0; n < (embedding_size + 31) / 32 * 32; n += 32) {
T reg = T(0);
T square = T(0);
int32_t rd_offset =
wid * embedding_size * field_num + field_1 * embedding_size + n + lane_id;
T* wr_ptr = smem_cross_square_sum + mem_field_offset + n + lane_id;
if (n + lane_id < embedding_size) {
reg = weight_tensor[rd_offset];
square = reg * reg;
atomicAdd(wr_ptr, square);
}
}
}
__syncthreads();
int32_t i = 0;
for (int32_t i = 0; i < fw_field_num; i += total_thread) {
if (i + tid < fw_field_num) {
int32_t field_1 = smem_fw_field_map[common_fw_field_map_offset_for_ad + i + tid];
int32_t field_1_part1 = smem_fw_field_map[i + tid];
if (field_1 < 0 && field_1_part1 != 1) {
field_1 = field_1_part1;
}
smem_fw_field_map[i + tid] = field_1;
}
}
__syncthreads();
if (warp_id == 0 && lane_id == 0) {
int32_t cnt = 0;
for (int32_t i = 0; i < fw_field_num; i++) {
if (smem_fw_field_map[i] >= 0) {
smem_fw_map_idx[cnt++] = i;
}
}
smem_fw_map_idx[fw_field_num] = cnt;
}
__syncthreads();
}
template <typename T = float, int32_t warp_num = 32>
__device__ void ProcessOutput_share(T* smem_cross_mean_sum, T* smem_cross_square_sum,
T* mem_fw_cross_mean_sum, T* mem_fw_cross_square_sum,
int32_t* smem_fw_field_map, int32_t* smem_fw_map_idx,
const T* fw_weight_tensor, T* output_gmem, int32_t batch_id,
int32_t embedding_size, int32_t field_num,
int32_t fw_field_num) {
int32_t warp_id = threadIdx.y;
int32_t lane_id = threadIdx.x;
int32_t weight_size_pad = (embedding_size + 31) / 32 * 32;
int32_t common_fw_field_map_offset_for_ad = blockIdx.x > 0 ? fw_field_num : 0;
// T output_accu[(embedding_size + 31) / 32] = {0};
T output_accu[4] = {0};
int32_t total_vaild_fw_field = smem_fw_map_idx[fw_field_num];
for (int32_t fw_field_1_idx = warp_id; fw_field_1_idx < total_vaild_fw_field;
fw_field_1_idx += warp_num) {
int32_t fw_field_1 = smem_fw_map_idx[fw_field_1_idx];
int32_t fw_iter = (2 + fw_field_1) * (fw_field_1 + 1) / 2 - (fw_field_1 + 1);
int32_t field_1 = smem_fw_field_map[fw_field_1];
for (int32_t fw_field_2_idx = 0; fw_field_2_idx < fw_field_1_idx; fw_field_2_idx++) {
int32_t fw_field_2 = smem_fw_map_idx[fw_field_2_idx];
int32_t field_2 = smem_fw_field_map[fw_field_2];
T fw_weight_reg = fw_weight_tensor[fw_iter + fw_field_2] + T(1);
int32_t index_1 = (field_1 * fw_field_num + fw_field_2) * embedding_size;
int32_t index_2 = (field_2 * fw_field_num + fw_field_1) * embedding_size;
for (int32_t n = 0; n < weight_size_pad; n += 32) {
T mean_index_1_sum = T(0);
T mean_index_2_sum = T(0);
if (n + lane_id < embedding_size) {
mean_index_1_sum = smem_cross_mean_sum[index_1 + n + lane_id];
mean_index_2_sum = smem_cross_mean_sum[index_2 + n + lane_id];
}
output_accu[n / 32] += mean_index_1_sum * mean_index_2_sum * fw_weight_reg;
}
}
T fw_weight_reg = fw_weight_tensor[fw_iter + fw_field_1] + T(1);
int32_t index_1 = (field_1 * fw_field_num + fw_field_1) * embedding_size;
for (int32_t n = 0; n < weight_size_pad; n += 32) {
T cross_mean_sum = T(0);
T cross_square_sum = T(0);
if (n + lane_id < embedding_size) {
cross_mean_sum = smem_cross_mean_sum[index_1 + n + lane_id];
cross_square_sum = smem_cross_square_sum[fw_field_1 * embedding_size + n + lane_id];
}
output_accu[n / 32] +=
T(0.5) * (cross_mean_sum * cross_mean_sum - cross_square_sum) * fw_weight_reg;
}
}
for (int32_t n = 0; n < weight_size_pad; n += 32) {
if (n + lane_id < embedding_size) {
T* Outptr = (output_gmem + batch_id * embedding_size + n + lane_id);
atomicAdd(Outptr, output_accu[n / 32]);
}
}
}
template <typename T = float, int32_t warp_num = 32>
__device__ void ProcessOutput(T* mem_cross_mean_sum, T* mem_cross_square_sum,
T* mem_fw_cross_mean_sum, T* mem_fw_cross_square_sum,
int32_t* smem_fw_field_map, int32_t* smem_fw_map_idx,
const T* fw_weight_tensor, T* output_gmem, int32_t batch_id,
int32_t embedding_size, int32_t field_num, int32_t fw_field_num) {
int32_t warp_id = threadIdx.y;
int32_t lane_id = threadIdx.x;
int32_t weight_size_pad = (embedding_size + 31) / 32 * 32;
int32_t common_fw_field_map_offset_for_ad = blockIdx.x > 0 ? fw_field_num : 0;
// T output_accu[(embedding_size + 31) / 32] = {0};
T output_accu[6] = { 0 };
int32_t total_vaild_fw_field = smem_fw_map_idx[fw_field_num];
for (int32_t fw_field_1_idx = warp_id; fw_field_1_idx < total_vaild_fw_field;
fw_field_1_idx += warp_num) {
int32_t fw_field_1 = smem_fw_map_idx[fw_field_1_idx];
int32_t fw_iter = (2 + fw_field_1) * (fw_field_1 + 1) / 2 - (fw_field_1 + 1);
int32_t field_1 = smem_fw_field_map[fw_field_1];
for (int32_t fw_field_2_idx = 0; fw_field_2_idx < fw_field_1_idx; fw_field_2_idx++) {
int32_t fw_field_2 = smem_fw_map_idx[fw_field_2_idx];
int32_t field_2 = smem_fw_field_map[fw_field_2];
T fw_weight_reg = fw_weight_tensor[fw_iter + fw_field_2] + T(1);
int32_t index_1 = (field_1 * fw_field_num + fw_field_2) * embedding_size;
int32_t index_2 = (field_2 * fw_field_num + fw_field_1) * embedding_size;
for (int32_t n = 0; n < weight_size_pad; n += 32) {
T mean_index_1_sum = T(0);
T mean_index_2_sum = T(0);
if (n + lane_id < embedding_size) {
mean_index_1_sum = mem_cross_mean_sum[index_1 + n + lane_id];
mean_index_2_sum = mem_cross_mean_sum[index_2 + n + lane_id];
}
output_accu[n / 32] += mean_index_1_sum * mean_index_2_sum * fw_weight_reg;
}
}
T fw_weight_reg = fw_weight_tensor[fw_iter + fw_field_1] + T(1);
int32_t index_1 = (field_1 * fw_field_num + fw_field_1) * embedding_size;
for (int32_t n = 0; n < weight_size_pad; n += 32) {
T cross_mean_sum = T(0);
T cross_square_sum = T(0);
if (n + lane_id < embedding_size) {
cross_mean_sum = mem_cross_mean_sum[index_1 + n + lane_id];
cross_square_sum = mem_cross_square_sum[fw_field_1 * embedding_size + n + lane_id];
}
output_accu[n / 32] +=
T(0.5) * (cross_mean_sum * cross_mean_sum - cross_square_sum) * fw_weight_reg;
}
}
for (int32_t n = 0; n < weight_size_pad; n += 32) {
if (n + lane_id < embedding_size) {
T* Outptr = (output_gmem + batch_id * embedding_size + n + lane_id);
atomicAdd(Outptr, output_accu[n / 32]);
}
}
}
template <typename T = float, int32_t warp_num = 32>
__global__ void ProcessFwffmOutput(int32_t embedding_size, int32_t field_num, int32_t fw_field_num,
bool fw_weight_multil_flag, int32_t* sample_feature_start_addr,
int32_t* sample_feature_end_addr, const T* weight_tensor,
const int32_t* field_tensor, const T* fw_weight_tensor,
T* output_tensor, T* workspace)
{
int32_t batch_size = gridDim.x;
int32_t warp_id = threadIdx.y;
extern __shared__ float smem_pool[];
int32_t batch_id = blockIdx.x;
int32_t fw_weight_size = (fw_field_num + 1) * fw_field_num / 2;
int32_t* smem_fw_field_map = reinterpret_cast<int32_t*>(smem_pool);
int32_t* smem_fw_map_idx = smem_fw_field_map + 2 * fw_field_num;
// Use global memory in case of lacking atomicAdd float in shared mem
T* mem_cross_mean_sum =
workspace + batch_id * (embedding_size * (field_num + 1) * fw_field_num);
T* mem_cross_square_sum = mem_cross_mean_sum + embedding_size * field_num * fw_field_num;
T* mem_fw_cross_mean_sum =
workspace + batch_size * (embedding_size * (field_num + 1) * fw_field_num);
T* mem_fw_cross_square_sum = mem_fw_cross_mean_sum + embedding_size * field_num * fw_field_num;
int32_t* mem_fw_field_map =
reinterpret_cast<int*>(mem_fw_cross_square_sum + fw_field_num * embedding_size);
const T* local_fw_weight_data = fw_weight_tensor;
if (fw_weight_multil_flag) {
local_fw_weight_data = fw_weight_tensor + batch_id * fw_weight_size;
}
int32_t this_sample_feature_start_addr = sample_feature_start_addr[batch_id];
int32_t this_sample_feature_end_addr = sample_feature_start_addr[batch_id + 1];
int32_t this_sample_feature_num = this_sample_feature_end_addr - this_sample_feature_start_addr;
int32_t sample_0_feature_start_addr = sample_feature_start_addr[0];
int32_t sample_0_feature_end_addr = sample_feature_start_addr[1];
int32_t sample_0_feature_num = sample_0_feature_end_addr - sample_0_feature_start_addr;
ProcessSamplePart<T, warp_num>(
smem_fw_field_map, smem_fw_map_idx, mem_cross_mean_sum, mem_cross_square_sum,
mem_fw_cross_mean_sum, mem_fw_cross_square_sum, mem_fw_field_map, embedding_size, field_num,
fw_field_num, this_sample_feature_num, this_sample_feature_start_addr, sample_0_feature_num,
sample_0_feature_start_addr, weight_tensor, field_tensor, warp_num * embedding_size);
ProcessOutput<T, warp_num>(mem_cross_mean_sum, mem_cross_square_sum, mem_fw_cross_mean_sum,
mem_fw_cross_square_sum, smem_fw_field_map, smem_fw_map_idx,
local_fw_weight_data, output_tensor, batch_id, embedding_size, field_num,
fw_field_num);
}
template <typename T = float, int32_t warp_num = 32>
__global__ void ProcessFwffmOutput_share(int32_t embedding_size, int32_t field_num,
int32_t fw_field_num, bool fw_weight_multil_flag,
int32_t* sample_feature_start_addr,
int32_t* sample_feature_end_addr, const T* weight_tensor,
const int32_t* field_tensor, const T* fw_weight_tensor,
T* output_tensor, T* workspace) {
int32_t batch_size = gridDim.x;
int32_t warp_id = threadIdx.y;
extern __shared__ float smem_pool[];
int32_t batch_id = blockIdx.x;
int32_t fw_weight_size = (fw_field_num + 1) * fw_field_num / 2;
int32_t* smem_fw_field_map = reinterpret_cast<int32_t*>(smem_pool);
int32_t* smem_fw_map_idx = smem_fw_field_map + 2 * fw_field_num;
// Use global memory in case of lacking atomicAdd float in shared mem
T* smem_cross_mean_sum = reinterpret_cast<T*>(smem_fw_map_idx + fw_field_num + 1);
T* smem_cross_square_sum = smem_cross_mean_sum + embedding_size * field_num * fw_field_num;
T* mem_fw_cross_mean_sum =
workspace + batch_size * (embedding_size * (field_num + 1) * fw_field_num);
T* mem_fw_cross_square_sum = mem_fw_cross_mean_sum + embedding_size * field_num * fw_field_num;
int32_t* mem_fw_field_map =
reinterpret_cast<int*>(mem_fw_cross_square_sum + fw_field_num * embedding_size);
const T* local_fw_weight_data = fw_weight_tensor;
if (fw_weight_multil_flag) {
local_fw_weight_data = fw_weight_tensor + batch_id * fw_weight_size;
}
int32_t this_sample_feature_start_addr = sample_feature_start_addr[batch_id];
int32_t this_sample_feature_end_addr = sample_feature_start_addr[batch_id + 1];
int32_t this_sample_feature_num = this_sample_feature_end_addr - this_sample_feature_start_addr;
int32_t sample_0_feature_start_addr = sample_feature_start_addr[0];
int32_t sample_0_feature_end_addr = sample_feature_start_addr[1];
int32_t sample_0_feature_num = sample_0_feature_end_addr - sample_0_feature_start_addr;
ProcessSamplePart_share<T, warp_num>(
smem_fw_field_map, smem_fw_map_idx, smem_cross_mean_sum, smem_cross_square_sum,
mem_fw_cross_mean_sum, mem_fw_cross_square_sum, mem_fw_field_map, embedding_size, field_num,
fw_field_num, this_sample_feature_num, this_sample_feature_start_addr, sample_0_feature_num,
sample_0_feature_start_addr, weight_tensor, field_tensor, warp_num * embedding_size);
ProcessOutput_share<T, warp_num>(
smem_cross_mean_sum, smem_cross_square_sum, mem_fw_cross_mean_sum, mem_fw_cross_square_sum,
smem_fw_field_map, smem_fw_map_idx, fw_weight_tensor, output_tensor, batch_id,
embedding_size, field_num, fw_field_num);
}
namespace functor {
template <typename T>
int32_t ComputeSparseFwffm(hipStream_t stream, const void* const* input, T* output,
void* worksapce, const int32_t fw_field_num,
const int32_t fw_weight_size, const bool fw_weight_multil_flag,
const int32_t sample_feature_size, const int32_t field_num,
const int32_t embedding_size, const int32_t batch_size) {
const T* weight_data = static_cast<const T*>(input[0]);
const T* fw_weight_data = static_cast<const T*>(input[1]);
const int32_t* field_data = static_cast<const int32_t*>(input[2]);
const int32_t* index_data = static_cast<const int32_t*>(input[3]);
const int32_t kThreadsPerBlock = 1024;
const size_t kBufferSize = field_num * fw_field_num * embedding_size;
int32_t* sample_feature_start_addr = reinterpret_cast<int32_t*>(worksapce);
int32_t* sample_feature_end_addr = sample_feature_start_addr + batch_size + 1;
T* gmem_cross_sum = reinterpret_cast<T*>(sample_feature_end_addr + batch_size + 1);
T* gmem_fw_cross_mean_sum = gmem_cross_sum + batch_size * (field_num + 1) * embedding_size * fw_field_num;
T* gmem_fw_cross_square_sum = gmem_fw_cross_mean_sum + embedding_size * field_num * fw_field_num;
int32_t* gmem_fw_field_map =
reinterpret_cast<int*>(gmem_fw_cross_square_sum + embedding_size * fw_field_num);
CUDA_CHECK(hipMemsetAsync(gmem_fw_cross_mean_sum, 0,
sizeof(float) * (embedding_size * field_num * fw_field_num +
embedding_size * fw_field_num), stream));
CUDA_CHECK(hipMemsetAsync(gmem_fw_field_map, -1, sizeof(int) * (fw_field_num), stream));
CUDA_CHECK(hipMemsetAsync(output, 0, sizeof(float) * (batch_size * embedding_size), stream));
CUDA_CHECK(hipMemsetAsync(sample_feature_start_addr, 0, sizeof(int32_t) * (batch_size + 1), stream));
CUDA_CHECK(hipMemsetAsync(sample_feature_end_addr, 0, sizeof(int32_t) * (batch_size + 1), stream));
hipLaunchKernelGGL(( ComputeBatchBoundary), dim3(DIVUP(sample_feature_size, 1024)), dim3(1024), 0, stream,
index_data, sample_feature_size, batch_size, sample_feature_start_addr,
sample_feature_end_addr);
constexpr int32_t warp_num = 32;
dim3 block(32, 32);
dim3 grid0(1);
hipLaunchKernelGGL(( ProcessCommonPart<T, 32>), dim3(grid0), dim3(block), 0, stream,
embedding_size, field_num, fw_field_num, sample_feature_start_addr, weight_data, field_data,
gmem_fw_cross_mean_sum, gmem_fw_cross_square_sum, gmem_fw_field_map);
int32_t share_mem_size = (fw_field_num * 3 + 1) * sizeof(int32_t) +
embedding_size * (field_num + 1) * fw_field_num * sizeof(T);
if (share_mem_size < 65536) {
hipFuncSetAttribute(ProcessFwffmOutput_share<T, warp_num>,
hipFuncAttributeMaxDynamicSharedMemorySize, 65536);
dim3 grid(batch_size);
hipLaunchKernelGGL(( ProcessFwffmOutput_share<T, warp_num>), dim3(grid), dim3(block), share_mem_size, stream,
embedding_size, field_num, fw_field_num, fw_weight_multil_flag,
sample_feature_start_addr, sample_feature_end_addr, weight_data, field_data,
fw_weight_data, output, gmem_cross_sum);
} else {
// printf("Do not use shared memory.\n");
dim3 block_set(embedding_size);
dim3 grid_set(batch_size * fw_field_num);
hipLaunchKernelGGL(( BroadcastCommonPart<T>), dim3(grid_set), dim3(block_set), 0, stream,
batch_size, embedding_size, field_num, fw_field_num, gmem_fw_cross_mean_sum,
gmem_fw_cross_square_sum, gmem_cross_sum);
int32_t shared_mem_required_bytes = (fw_field_num * (field_num + 1) + 1)* sizeof(int32_t);
dim3 grid(batch_size);
hipFuncSetAttribute(ProcessFwffmOutput<T, warp_num>,
hipFuncAttributeMaxDynamicSharedMemorySize, 65536);
hipLaunchKernelGGL(( ProcessFwffmOutput<T, warp_num>), dim3(grid), dim3(block), shared_mem_required_bytes, stream,
embedding_size, field_num, fw_field_num, fw_weight_multil_flag,
sample_feature_start_addr, sample_feature_end_addr, weight_data, field_data,
fw_weight_data, output, gmem_cross_sum);
}
return 1;
}
template int32_t ComputeSparseFwffm(hipStream_t stream, const void* const* input, float* output,
void* worksapce, const int32_t fw_field_num,
const int32_t fw_weight_size, const bool fw_weight_multil_flag,
const int32_t sample_feature_size, const int32_t field_num,
const int32_t embedding_size, const int32_t batch_size);
template int32_t ComputeSparseFwffm(hipStream_t stream, const void* const* input, half* output,
void* worksapce, const int32_t fw_field_num,
const int32_t fw_weight_size, const bool fw_weight_multil_flag,
const int32_t sample_feature_size, const int32_t field_num,
const int32_t embedding_size, const int32_t batch_size);
} // namespace functor
} // namespace sparse_fwffm
} // namespace nvinfer1
|
91ad0ec6d32d417428800e06f28dcd6546aae47f.cu
|
// Copyright 2020, Tencent Inc.
// All rights reserved.
//
// @author shaorunwang <[email protected]>
#include <cuda.h>
#include <cuda_fp16.h>
#include <thrust/extrema.h>
#include <fstream>
#include <iostream>
//#include "NvInfer.h"
//#include "sparse_fwffm_plugin.h"
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m, n) (((m) / (n)) + ((m) % (n) > 0))
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
namespace nvinfer1 {
namespace sparse_fwffm {
__global__ void ComputeBatchBoundary(const int32_t* index_tensor, int32_t total_feature_num,
int32_t batch_size, int32_t* sample_feature_start_addr,
int32_t* sample_feature_end_addr) {
int32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < total_feature_num) {
int32_t idx = index_tensor[tid];
// atomicMin(sample_feature_start_addr + idx, tid);
// atomicMax(sample_feature_end_addr + idx, tid + 1);
if (tid > 0) {
int32_t pre_idx = index_tensor[tid - 1];
for (int32_t i = idx; i > pre_idx; --i) {
sample_feature_start_addr[i] = tid;
}
} else {
int32_t first_idx = index_tensor[0];
for (int32_t i = 0; i <= first_idx; ++i) {
sample_feature_start_addr[i] = 0;
}
int32_t last_idx = index_tensor[total_feature_num - 1];
for (int32_t i = batch_size - 1; i > last_idx; --i) {
sample_feature_start_addr[i] = total_feature_num;
}
sample_feature_start_addr[batch_size] = total_feature_num;
}
}
}
template <typename T = float, int32_t warp_num = 32>
__global__ void ProcessCommonPart(int32_t embedding_size, int32_t field_num, int32_t fw_field_num,
int32_t* sample_feature_start_addr, const T* weight_tensor,
const int32_t* field_tensor, T* gmem_fw_cross_mean_sum,
T* gmem_fw_cross_square_sum, int32_t* gmem_fw_field_map) {
int32_t warp_id = threadIdx.y;
int32_t lane_id = threadIdx.x;
int32_t global_warp_id = blockIdx.x * warp_num + warp_id;
int32_t total_global_warp_num = gridDim.x * warp_num;
int32_t tid = threadIdx.x + threadIdx.y * blockDim.x;
int32_t total_thread = warp_num * 32;
int32_t common_feature_num = sample_feature_start_addr[0];
for (int32_t wid = global_warp_id; wid < common_feature_num; wid += total_global_warp_num) {
int32_t field_1 = field_tensor[wid * 2] - 1;
int32_t fw_field_1 = field_tensor[wid * 2 + 1] - 1;
if (fw_field_1 < 0 || fw_field_1 >= fw_field_num || field_1 < 0 || field_1 >= field_num)
continue;
if (lane_id == 0) gmem_fw_field_map[fw_field_1] = field_1;
#pragma unroll
for (int32_t field_2 = 0; field_2 < field_num; field_2++) {
int32_t mem_field_offset = (field_2 * fw_field_num + fw_field_1) * embedding_size;
#pragma unroll
for (int32_t n = 0; n < (embedding_size + 31) / 32 * 32; n += 32) {
if (n + lane_id < embedding_size) {
T reg = weight_tensor[wid * embedding_size * field_num +
field_2 * embedding_size + n + lane_id];
atomicAdd(gmem_fw_cross_mean_sum + mem_field_offset + n + lane_id, reg);
}
}
}
int32_t mem_field_offset = fw_field_1 * embedding_size;
#pragma unroll
for (int32_t n = 0; n < (embedding_size + 31) / 32 * 32; n += 32) {
if (n + lane_id < embedding_size) {
T reg = weight_tensor[wid * embedding_size * field_num + field_1 * embedding_size +
n + lane_id];
float square = reg * reg;
atomicAdd(gmem_fw_cross_square_sum + mem_field_offset + n + lane_id, square);
}
}
}
}
template <typename T = float>
__global__ void BroadcastCommonPart(int32_t batch, int32_t embedding_size, int32_t field_num,
int32_t fw_field_num, T* gmem_fw_cross_mean_sum,
T* gmem_fw_cross_square_sum, T* output) {
int32_t lane_id = threadIdx.x;
int32_t fw_field_id = blockIdx.x % fw_field_num;
int32_t tid = lane_id + fw_field_id * embedding_size;
int32_t bid = blockIdx.x / fw_field_num;
// printf("bid : %d, tid: %d\n",bid, tid);
T Reg_square = gmem_fw_cross_square_sum[tid];
output[bid * (embedding_size * (field_num + 1) * fw_field_num) +
embedding_size * field_num * fw_field_num + tid] = Reg_square;
T Reg_mean_0 = gmem_fw_cross_mean_sum[tid];
T Reg_mean_1 = gmem_fw_cross_mean_sum[embedding_size * fw_field_num + tid];
output[bid * (embedding_size * (field_num + 1) * fw_field_num) + tid] = Reg_mean_0;
output[bid * (embedding_size * (field_num + 1) * fw_field_num) + embedding_size * fw_field_num +
tid] = Reg_mean_1;
}
template <typename T = float, int32_t warp_num = 32>
__device__ void ProcessSamplePart(
int32_t* smem_fw_field_map, int32_t* smem_fw_map_idx, T* gmem_cross_mean_sum,
T* gmem_cross_square_sum, T* gmem_fw_cross_mean_sum, T* gmem_fw_cross_square_sum,
int32_t* gmem_fw_field_map, int32_t embedding_size, int32_t field_num, int32_t fw_field_num,
int32_t this_sample_feature_num, int32_t this_sample_feature_start_addr,
int32_t sample_0_feature_num, int32_t sample_0_feature_start_addr, const T* weight_tensor,
const int* field_tensor, int32_t shared_mem_elements) {
constexpr int32_t total_thread = warp_num * 32;
int32_t warp_id = threadIdx.y;
int32_t lane_id = threadIdx.x;
int32_t tid = threadIdx.x + threadIdx.y * 32;
for (int32_t i = 0; i < fw_field_num; i += warp_num * 32) {
if (i + tid < fw_field_num) {
smem_fw_field_map[i + tid] = gmem_fw_field_map[i + tid];
}
}
for (int32_t i = fw_field_num; i < fw_field_num * 2; i += warp_num * 32) {
if (i + tid < fw_field_num * 2) {
smem_fw_field_map[i + tid] = -1;
}
}
__syncthreads();
// patch
if (blockIdx.x != 0) {
int32_t sample_0_start_row = warp_id + sample_0_feature_start_addr;
int32_t sample_0_end_row = sample_0_feature_num + sample_0_feature_start_addr;
for (int32_t wid = sample_0_start_row; wid < sample_0_end_row; wid += warp_num) {
int32_t field_1 = field_tensor[wid * 2] - 1;
int32_t fw_field_1 = field_tensor[wid * 2 + 1] - 1;
if (fw_field_1 < 0 || fw_field_1 >= fw_field_num || field_1 < 0 || field_1 >= field_num)
continue;
if (lane_id == 0) smem_fw_field_map[fw_field_1] = field_1;
}
}
// sample feature phase
int32_t common_fw_field_map_offset_for_ad = blockIdx.x > 0 ? fw_field_num : 0;
int32_t sample_start_row = warp_id + this_sample_feature_start_addr;
int32_t sample_end_row = this_sample_feature_num + this_sample_feature_start_addr;
for (int32_t wid = sample_start_row; wid < sample_end_row; wid += warp_num) {
int32_t field_1 = field_tensor[wid * 2] - 1;
int32_t fw_field_1 = field_tensor[wid * 2 + 1] - 1;
if (fw_field_1 < 0 || fw_field_1 >= fw_field_num || field_1 < 0 || field_1 >= field_num)
continue;
if (lane_id == 0) {
smem_fw_field_map[fw_field_1 + common_fw_field_map_offset_for_ad] = field_1;
}
#pragma unroll
for (int32_t field_2 = 0; field_2 < field_num; field_2++) {
int32_t mem_field_offset = (field_2 * fw_field_num + fw_field_1) * embedding_size;
#pragma unroll
for (int32_t n = 0; n < (embedding_size + 31) / 32 * 32; n += 32) {
T reg = T(0);
int32_t rd_offset =
wid * embedding_size * field_num + field_2 * embedding_size + n + lane_id;
T* wr_ptr = gmem_cross_mean_sum + mem_field_offset + n + lane_id;
if (n + lane_id < embedding_size) {
reg = weight_tensor[rd_offset];
atomicAdd(wr_ptr, reg);
}
}
}
int32_t mem_field_offset = fw_field_1 * embedding_size;
#pragma unroll
for (int32_t n = 0; n < (embedding_size + 31) / 32 * 32; n += 32) {
T reg = T(0);
T square = T(0);
int32_t rd_offset =
wid * embedding_size * field_num + field_1 * embedding_size + n + lane_id;
T* wr_ptr = gmem_cross_square_sum + mem_field_offset + n + lane_id;
if (n + lane_id < embedding_size) {
reg = weight_tensor[rd_offset];
square = reg * reg;
atomicAdd(wr_ptr, square);
}
}
}
__syncthreads();
int32_t i = 0;
for (int32_t i = 0; i < fw_field_num; i += total_thread) {
if (i + tid < fw_field_num) {
int32_t field_1 = smem_fw_field_map[common_fw_field_map_offset_for_ad + i + tid];
int32_t field_1_part1 = smem_fw_field_map[i + tid];
if (field_1 < 0 && field_1_part1 != 1) {
field_1 = field_1_part1;
}
smem_fw_field_map[i + tid] = field_1;
}
}
__syncthreads();
if (warp_id == 0 && lane_id == 0) {
int32_t cnt = 0;
for (int32_t i = 0; i < fw_field_num; i++) {
if (smem_fw_field_map[i] >= 0) {
smem_fw_map_idx[cnt++] = i;
}
}
smem_fw_map_idx[fw_field_num] = cnt;
}
__syncthreads();
}
template <typename T = float, int32_t warp_num = 32>
__device__ void ProcessSamplePart_share(
int32_t* smem_fw_field_map, int32_t* smem_fw_map_idx, T* smem_cross_mean_sum,
T* smem_cross_square_sum, T* gmem_fw_cross_mean_sum, T* gmem_fw_cross_square_sum,
int32_t* gmem_fw_field_map, int32_t embedding_size, int32_t field_num, int32_t fw_field_num,
int32_t this_sample_feature_num, int32_t this_sample_feature_start_addr,
int32_t sample_0_feature_num, int32_t sample_0_feature_start_addr, const T* weight_tensor,
const int* field_tensor, int32_t shared_mem_elements) {
constexpr int32_t total_thread = warp_num * 32;
int32_t warp_id = threadIdx.y;
int32_t lane_id = threadIdx.x;
int32_t tid = threadIdx.x + threadIdx.y * 32;
for (int32_t i = 0; i < fw_field_num; i += warp_num * 32) {
if (i + tid < fw_field_num) {
smem_fw_field_map[i + tid] = gmem_fw_field_map[i + tid];
}
}
for (int32_t i = fw_field_num; i < fw_field_num * 2; i += warp_num * 32) {
if (i + tid < fw_field_num * 2) {
smem_fw_field_map[i + tid] = -1;
}
}
for (int32_t i = tid; i < embedding_size * field_num * fw_field_num; i += warp_num * 32) {
smem_cross_mean_sum[i] = gmem_fw_cross_mean_sum[i];
if (i < embedding_size * fw_field_num) {
smem_cross_square_sum[i] = gmem_fw_cross_square_sum[i];
}
}
__syncthreads();
// patch
if (blockIdx.x != 0) {
int32_t sample_0_start_row = warp_id + sample_0_feature_start_addr;
int32_t sample_0_end_row = sample_0_feature_num + sample_0_feature_start_addr;
for (int32_t wid = sample_0_start_row; wid < sample_0_end_row; wid += warp_num) {
int32_t field_1 = field_tensor[wid * 2] - 1;
int32_t fw_field_1 = field_tensor[wid * 2 + 1] - 1;
if (fw_field_1 < 0 || fw_field_1 >= fw_field_num || field_1 < 0 || field_1 >= field_num)
continue;
if (lane_id == 0) smem_fw_field_map[fw_field_1] = field_1;
}
}
// sample feature phase
int32_t common_fw_field_map_offset_for_ad = blockIdx.x > 0 ? fw_field_num : 0;
int32_t sample_start_row = warp_id + this_sample_feature_start_addr;
int32_t sample_end_row = this_sample_feature_num + this_sample_feature_start_addr;
for (int32_t wid = sample_start_row; wid < sample_end_row; wid += warp_num) {
int32_t field_1 = field_tensor[wid * 2] - 1;
int32_t fw_field_1 = field_tensor[wid * 2 + 1] - 1;
if (fw_field_1 < 0 || fw_field_1 >= fw_field_num || field_1 < 0 || field_1 >= field_num)
continue;
if (lane_id == 0) {
smem_fw_field_map[fw_field_1 + common_fw_field_map_offset_for_ad] = field_1;
}
#pragma unroll
for (int32_t field_2 = 0; field_2 < field_num; field_2++) {
int32_t mem_field_offset = (field_2 * fw_field_num + fw_field_1) * embedding_size;
#pragma unroll
for (int32_t n = 0; n < (embedding_size + 31) / 32 * 32; n += 32) {
T reg = T(0);
int32_t rd_offset =
wid * embedding_size * field_num + field_2 * embedding_size + n + lane_id;
T* wr_ptr = smem_cross_mean_sum + mem_field_offset + n + lane_id;
if (n + lane_id < embedding_size) {
reg = weight_tensor[rd_offset];
atomicAdd(wr_ptr, reg);
}
}
}
int32_t mem_field_offset = fw_field_1 * embedding_size;
#pragma unroll
for (int32_t n = 0; n < (embedding_size + 31) / 32 * 32; n += 32) {
T reg = T(0);
T square = T(0);
int32_t rd_offset =
wid * embedding_size * field_num + field_1 * embedding_size + n + lane_id;
T* wr_ptr = smem_cross_square_sum + mem_field_offset + n + lane_id;
if (n + lane_id < embedding_size) {
reg = weight_tensor[rd_offset];
square = reg * reg;
atomicAdd(wr_ptr, square);
}
}
}
__syncthreads();
int32_t i = 0;
for (int32_t i = 0; i < fw_field_num; i += total_thread) {
if (i + tid < fw_field_num) {
int32_t field_1 = smem_fw_field_map[common_fw_field_map_offset_for_ad + i + tid];
int32_t field_1_part1 = smem_fw_field_map[i + tid];
if (field_1 < 0 && field_1_part1 != 1) {
field_1 = field_1_part1;
}
smem_fw_field_map[i + tid] = field_1;
}
}
__syncthreads();
if (warp_id == 0 && lane_id == 0) {
int32_t cnt = 0;
for (int32_t i = 0; i < fw_field_num; i++) {
if (smem_fw_field_map[i] >= 0) {
smem_fw_map_idx[cnt++] = i;
}
}
smem_fw_map_idx[fw_field_num] = cnt;
}
__syncthreads();
}
template <typename T = float, int32_t warp_num = 32>
__device__ void ProcessOutput_share(T* smem_cross_mean_sum, T* smem_cross_square_sum,
T* mem_fw_cross_mean_sum, T* mem_fw_cross_square_sum,
int32_t* smem_fw_field_map, int32_t* smem_fw_map_idx,
const T* fw_weight_tensor, T* output_gmem, int32_t batch_id,
int32_t embedding_size, int32_t field_num,
int32_t fw_field_num) {
int32_t warp_id = threadIdx.y;
int32_t lane_id = threadIdx.x;
int32_t weight_size_pad = (embedding_size + 31) / 32 * 32;
int32_t common_fw_field_map_offset_for_ad = blockIdx.x > 0 ? fw_field_num : 0;
// T output_accu[(embedding_size + 31) / 32] = {0};
T output_accu[4] = {0};
int32_t total_vaild_fw_field = smem_fw_map_idx[fw_field_num];
for (int32_t fw_field_1_idx = warp_id; fw_field_1_idx < total_vaild_fw_field;
fw_field_1_idx += warp_num) {
int32_t fw_field_1 = smem_fw_map_idx[fw_field_1_idx];
int32_t fw_iter = (2 + fw_field_1) * (fw_field_1 + 1) / 2 - (fw_field_1 + 1);
int32_t field_1 = smem_fw_field_map[fw_field_1];
for (int32_t fw_field_2_idx = 0; fw_field_2_idx < fw_field_1_idx; fw_field_2_idx++) {
int32_t fw_field_2 = smem_fw_map_idx[fw_field_2_idx];
int32_t field_2 = smem_fw_field_map[fw_field_2];
T fw_weight_reg = fw_weight_tensor[fw_iter + fw_field_2] + T(1);
int32_t index_1 = (field_1 * fw_field_num + fw_field_2) * embedding_size;
int32_t index_2 = (field_2 * fw_field_num + fw_field_1) * embedding_size;
for (int32_t n = 0; n < weight_size_pad; n += 32) {
T mean_index_1_sum = T(0);
T mean_index_2_sum = T(0);
if (n + lane_id < embedding_size) {
mean_index_1_sum = smem_cross_mean_sum[index_1 + n + lane_id];
mean_index_2_sum = smem_cross_mean_sum[index_2 + n + lane_id];
}
output_accu[n / 32] += mean_index_1_sum * mean_index_2_sum * fw_weight_reg;
}
}
T fw_weight_reg = fw_weight_tensor[fw_iter + fw_field_1] + T(1);
int32_t index_1 = (field_1 * fw_field_num + fw_field_1) * embedding_size;
for (int32_t n = 0; n < weight_size_pad; n += 32) {
T cross_mean_sum = T(0);
T cross_square_sum = T(0);
if (n + lane_id < embedding_size) {
cross_mean_sum = smem_cross_mean_sum[index_1 + n + lane_id];
cross_square_sum = smem_cross_square_sum[fw_field_1 * embedding_size + n + lane_id];
}
output_accu[n / 32] +=
T(0.5) * (cross_mean_sum * cross_mean_sum - cross_square_sum) * fw_weight_reg;
}
}
for (int32_t n = 0; n < weight_size_pad; n += 32) {
if (n + lane_id < embedding_size) {
T* Outptr = (output_gmem + batch_id * embedding_size + n + lane_id);
atomicAdd(Outptr, output_accu[n / 32]);
}
}
}
template <typename T = float, int32_t warp_num = 32>
__device__ void ProcessOutput(T* mem_cross_mean_sum, T* mem_cross_square_sum,
T* mem_fw_cross_mean_sum, T* mem_fw_cross_square_sum,
int32_t* smem_fw_field_map, int32_t* smem_fw_map_idx,
const T* fw_weight_tensor, T* output_gmem, int32_t batch_id,
int32_t embedding_size, int32_t field_num, int32_t fw_field_num) {
int32_t warp_id = threadIdx.y;
int32_t lane_id = threadIdx.x;
int32_t weight_size_pad = (embedding_size + 31) / 32 * 32;
int32_t common_fw_field_map_offset_for_ad = blockIdx.x > 0 ? fw_field_num : 0;
// T output_accu[(embedding_size + 31) / 32] = {0};
T output_accu[6] = { 0 };
int32_t total_vaild_fw_field = smem_fw_map_idx[fw_field_num];
for (int32_t fw_field_1_idx = warp_id; fw_field_1_idx < total_vaild_fw_field;
fw_field_1_idx += warp_num) {
int32_t fw_field_1 = smem_fw_map_idx[fw_field_1_idx];
int32_t fw_iter = (2 + fw_field_1) * (fw_field_1 + 1) / 2 - (fw_field_1 + 1);
int32_t field_1 = smem_fw_field_map[fw_field_1];
for (int32_t fw_field_2_idx = 0; fw_field_2_idx < fw_field_1_idx; fw_field_2_idx++) {
int32_t fw_field_2 = smem_fw_map_idx[fw_field_2_idx];
int32_t field_2 = smem_fw_field_map[fw_field_2];
T fw_weight_reg = fw_weight_tensor[fw_iter + fw_field_2] + T(1);
int32_t index_1 = (field_1 * fw_field_num + fw_field_2) * embedding_size;
int32_t index_2 = (field_2 * fw_field_num + fw_field_1) * embedding_size;
for (int32_t n = 0; n < weight_size_pad; n += 32) {
T mean_index_1_sum = T(0);
T mean_index_2_sum = T(0);
if (n + lane_id < embedding_size) {
mean_index_1_sum = mem_cross_mean_sum[index_1 + n + lane_id];
mean_index_2_sum = mem_cross_mean_sum[index_2 + n + lane_id];
}
output_accu[n / 32] += mean_index_1_sum * mean_index_2_sum * fw_weight_reg;
}
}
T fw_weight_reg = fw_weight_tensor[fw_iter + fw_field_1] + T(1);
int32_t index_1 = (field_1 * fw_field_num + fw_field_1) * embedding_size;
for (int32_t n = 0; n < weight_size_pad; n += 32) {
T cross_mean_sum = T(0);
T cross_square_sum = T(0);
if (n + lane_id < embedding_size) {
cross_mean_sum = mem_cross_mean_sum[index_1 + n + lane_id];
cross_square_sum = mem_cross_square_sum[fw_field_1 * embedding_size + n + lane_id];
}
output_accu[n / 32] +=
T(0.5) * (cross_mean_sum * cross_mean_sum - cross_square_sum) * fw_weight_reg;
}
}
for (int32_t n = 0; n < weight_size_pad; n += 32) {
if (n + lane_id < embedding_size) {
T* Outptr = (output_gmem + batch_id * embedding_size + n + lane_id);
atomicAdd(Outptr, output_accu[n / 32]);
}
}
}
template <typename T = float, int32_t warp_num = 32>
__global__ void ProcessFwffmOutput(int32_t embedding_size, int32_t field_num, int32_t fw_field_num,
bool fw_weight_multil_flag, int32_t* sample_feature_start_addr,
int32_t* sample_feature_end_addr, const T* weight_tensor,
const int32_t* field_tensor, const T* fw_weight_tensor,
T* output_tensor, T* workspace)
{
int32_t batch_size = gridDim.x;
int32_t warp_id = threadIdx.y;
extern __shared__ float smem_pool[];
int32_t batch_id = blockIdx.x;
int32_t fw_weight_size = (fw_field_num + 1) * fw_field_num / 2;
int32_t* smem_fw_field_map = reinterpret_cast<int32_t*>(smem_pool);
int32_t* smem_fw_map_idx = smem_fw_field_map + 2 * fw_field_num;
// Use global memory in case of lacking atomicAdd float in shared mem
T* mem_cross_mean_sum =
workspace + batch_id * (embedding_size * (field_num + 1) * fw_field_num);
T* mem_cross_square_sum = mem_cross_mean_sum + embedding_size * field_num * fw_field_num;
T* mem_fw_cross_mean_sum =
workspace + batch_size * (embedding_size * (field_num + 1) * fw_field_num);
T* mem_fw_cross_square_sum = mem_fw_cross_mean_sum + embedding_size * field_num * fw_field_num;
int32_t* mem_fw_field_map =
reinterpret_cast<int*>(mem_fw_cross_square_sum + fw_field_num * embedding_size);
const T* local_fw_weight_data = fw_weight_tensor;
if (fw_weight_multil_flag) {
local_fw_weight_data = fw_weight_tensor + batch_id * fw_weight_size;
}
int32_t this_sample_feature_start_addr = sample_feature_start_addr[batch_id];
int32_t this_sample_feature_end_addr = sample_feature_start_addr[batch_id + 1];
int32_t this_sample_feature_num = this_sample_feature_end_addr - this_sample_feature_start_addr;
int32_t sample_0_feature_start_addr = sample_feature_start_addr[0];
int32_t sample_0_feature_end_addr = sample_feature_start_addr[1];
int32_t sample_0_feature_num = sample_0_feature_end_addr - sample_0_feature_start_addr;
ProcessSamplePart<T, warp_num>(
smem_fw_field_map, smem_fw_map_idx, mem_cross_mean_sum, mem_cross_square_sum,
mem_fw_cross_mean_sum, mem_fw_cross_square_sum, mem_fw_field_map, embedding_size, field_num,
fw_field_num, this_sample_feature_num, this_sample_feature_start_addr, sample_0_feature_num,
sample_0_feature_start_addr, weight_tensor, field_tensor, warp_num * embedding_size);
ProcessOutput<T, warp_num>(mem_cross_mean_sum, mem_cross_square_sum, mem_fw_cross_mean_sum,
mem_fw_cross_square_sum, smem_fw_field_map, smem_fw_map_idx,
local_fw_weight_data, output_tensor, batch_id, embedding_size, field_num,
fw_field_num);
}
template <typename T = float, int32_t warp_num = 32>
__global__ void ProcessFwffmOutput_share(int32_t embedding_size, int32_t field_num,
int32_t fw_field_num, bool fw_weight_multil_flag,
int32_t* sample_feature_start_addr,
int32_t* sample_feature_end_addr, const T* weight_tensor,
const int32_t* field_tensor, const T* fw_weight_tensor,
T* output_tensor, T* workspace) {
int32_t batch_size = gridDim.x;
int32_t warp_id = threadIdx.y;
extern __shared__ float smem_pool[];
int32_t batch_id = blockIdx.x;
int32_t fw_weight_size = (fw_field_num + 1) * fw_field_num / 2;
int32_t* smem_fw_field_map = reinterpret_cast<int32_t*>(smem_pool);
int32_t* smem_fw_map_idx = smem_fw_field_map + 2 * fw_field_num;
// Use global memory in case of lacking atomicAdd float in shared mem
T* smem_cross_mean_sum = reinterpret_cast<T*>(smem_fw_map_idx + fw_field_num + 1);
T* smem_cross_square_sum = smem_cross_mean_sum + embedding_size * field_num * fw_field_num;
T* mem_fw_cross_mean_sum =
workspace + batch_size * (embedding_size * (field_num + 1) * fw_field_num);
T* mem_fw_cross_square_sum = mem_fw_cross_mean_sum + embedding_size * field_num * fw_field_num;
int32_t* mem_fw_field_map =
reinterpret_cast<int*>(mem_fw_cross_square_sum + fw_field_num * embedding_size);
const T* local_fw_weight_data = fw_weight_tensor;
if (fw_weight_multil_flag) {
local_fw_weight_data = fw_weight_tensor + batch_id * fw_weight_size;
}
int32_t this_sample_feature_start_addr = sample_feature_start_addr[batch_id];
int32_t this_sample_feature_end_addr = sample_feature_start_addr[batch_id + 1];
int32_t this_sample_feature_num = this_sample_feature_end_addr - this_sample_feature_start_addr;
int32_t sample_0_feature_start_addr = sample_feature_start_addr[0];
int32_t sample_0_feature_end_addr = sample_feature_start_addr[1];
int32_t sample_0_feature_num = sample_0_feature_end_addr - sample_0_feature_start_addr;
ProcessSamplePart_share<T, warp_num>(
smem_fw_field_map, smem_fw_map_idx, smem_cross_mean_sum, smem_cross_square_sum,
mem_fw_cross_mean_sum, mem_fw_cross_square_sum, mem_fw_field_map, embedding_size, field_num,
fw_field_num, this_sample_feature_num, this_sample_feature_start_addr, sample_0_feature_num,
sample_0_feature_start_addr, weight_tensor, field_tensor, warp_num * embedding_size);
ProcessOutput_share<T, warp_num>(
smem_cross_mean_sum, smem_cross_square_sum, mem_fw_cross_mean_sum, mem_fw_cross_square_sum,
smem_fw_field_map, smem_fw_map_idx, fw_weight_tensor, output_tensor, batch_id,
embedding_size, field_num, fw_field_num);
}
namespace functor {
template <typename T>
int32_t ComputeSparseFwffm(cudaStream_t stream, const void* const* input, T* output,
void* worksapce, const int32_t fw_field_num,
const int32_t fw_weight_size, const bool fw_weight_multil_flag,
const int32_t sample_feature_size, const int32_t field_num,
const int32_t embedding_size, const int32_t batch_size) {
const T* weight_data = static_cast<const T*>(input[0]);
const T* fw_weight_data = static_cast<const T*>(input[1]);
const int32_t* field_data = static_cast<const int32_t*>(input[2]);
const int32_t* index_data = static_cast<const int32_t*>(input[3]);
const int32_t kThreadsPerBlock = 1024;
const size_t kBufferSize = field_num * fw_field_num * embedding_size;
int32_t* sample_feature_start_addr = reinterpret_cast<int32_t*>(worksapce);
int32_t* sample_feature_end_addr = sample_feature_start_addr + batch_size + 1;
T* gmem_cross_sum = reinterpret_cast<T*>(sample_feature_end_addr + batch_size + 1);
T* gmem_fw_cross_mean_sum = gmem_cross_sum + batch_size * (field_num + 1) * embedding_size * fw_field_num;
T* gmem_fw_cross_square_sum = gmem_fw_cross_mean_sum + embedding_size * field_num * fw_field_num;
int32_t* gmem_fw_field_map =
reinterpret_cast<int*>(gmem_fw_cross_square_sum + embedding_size * fw_field_num);
CUDA_CHECK(cudaMemsetAsync(gmem_fw_cross_mean_sum, 0,
sizeof(float) * (embedding_size * field_num * fw_field_num +
embedding_size * fw_field_num), stream));
CUDA_CHECK(cudaMemsetAsync(gmem_fw_field_map, -1, sizeof(int) * (fw_field_num), stream));
CUDA_CHECK(cudaMemsetAsync(output, 0, sizeof(float) * (batch_size * embedding_size), stream));
CUDA_CHECK(cudaMemsetAsync(sample_feature_start_addr, 0, sizeof(int32_t) * (batch_size + 1), stream));
CUDA_CHECK(cudaMemsetAsync(sample_feature_end_addr, 0, sizeof(int32_t) * (batch_size + 1), stream));
ComputeBatchBoundary<<<DIVUP(sample_feature_size, 1024), 1024, 0, stream>>>(
index_data, sample_feature_size, batch_size, sample_feature_start_addr,
sample_feature_end_addr);
constexpr int32_t warp_num = 32;
dim3 block(32, 32);
dim3 grid0(1);
ProcessCommonPart<T, 32><<<grid0, block, 0, stream>>>(
embedding_size, field_num, fw_field_num, sample_feature_start_addr, weight_data, field_data,
gmem_fw_cross_mean_sum, gmem_fw_cross_square_sum, gmem_fw_field_map);
int32_t share_mem_size = (fw_field_num * 3 + 1) * sizeof(int32_t) +
embedding_size * (field_num + 1) * fw_field_num * sizeof(T);
if (share_mem_size < 65536) {
cudaFuncSetAttribute(ProcessFwffmOutput_share<T, warp_num>,
cudaFuncAttributeMaxDynamicSharedMemorySize, 65536);
dim3 grid(batch_size);
ProcessFwffmOutput_share<T, warp_num><<<grid, block, share_mem_size, stream>>>(
embedding_size, field_num, fw_field_num, fw_weight_multil_flag,
sample_feature_start_addr, sample_feature_end_addr, weight_data, field_data,
fw_weight_data, output, gmem_cross_sum);
} else {
// printf("Do not use shared memory.\n");
dim3 block_set(embedding_size);
dim3 grid_set(batch_size * fw_field_num);
BroadcastCommonPart<T><<<grid_set, block_set, 0, stream>>>(
batch_size, embedding_size, field_num, fw_field_num, gmem_fw_cross_mean_sum,
gmem_fw_cross_square_sum, gmem_cross_sum);
int32_t shared_mem_required_bytes = (fw_field_num * (field_num + 1) + 1)* sizeof(int32_t);
dim3 grid(batch_size);
cudaFuncSetAttribute(ProcessFwffmOutput<T, warp_num>,
cudaFuncAttributeMaxDynamicSharedMemorySize, 65536);
ProcessFwffmOutput<T, warp_num><<<grid, block, shared_mem_required_bytes, stream>>>(
embedding_size, field_num, fw_field_num, fw_weight_multil_flag,
sample_feature_start_addr, sample_feature_end_addr, weight_data, field_data,
fw_weight_data, output, gmem_cross_sum);
}
return 1;
}
template int32_t ComputeSparseFwffm(cudaStream_t stream, const void* const* input, float* output,
void* worksapce, const int32_t fw_field_num,
const int32_t fw_weight_size, const bool fw_weight_multil_flag,
const int32_t sample_feature_size, const int32_t field_num,
const int32_t embedding_size, const int32_t batch_size);
template int32_t ComputeSparseFwffm(cudaStream_t stream, const void* const* input, half* output,
void* worksapce, const int32_t fw_field_num,
const int32_t fw_weight_size, const bool fw_weight_multil_flag,
const int32_t sample_feature_size, const int32_t field_num,
const int32_t embedding_size, const int32_t batch_size);
} // namespace functor
} // namespace sparse_fwffm
} // namespace nvinfer1
|
e7f7200c5fd1d2161aff36689ff0ad8052210cb3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename TileIterator>
__global__ void kernel_store_iterator(typename TileIterator::Params params,
typename TileIterator::TensorRef ref,
cutlass::MatrixCoord extent) {
TileIterator iterator(params, ref.data(), extent, threadIdx.x, {0, 0});
typename TileIterator::Fragment fragment;
CUTLASS_PRAGMA_NO_UNROLL
for (int iter = 0; iter < TileIterator::ThreadMap::Count::kTile; ++iter) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < TileIterator::Fragment::kElements; ++i) {
typename TileIterator::Element tidx(iter + 1);
fragment[i] = tidx;
}
iterator.store(fragment);
++iterator;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, typename Layout>
static bool verify_footprint(cutlass::TensorView<T, Layout> view,
cutlass::MatrixCoord extent) {
for (int r = 0; r < view.extent().row(); ++r) {
for (int c = 0; c < view.extent().column(); ++c) {
cutlass::MatrixCoord coord{r, c};
bool within = coord < extent;
if (within) {
if (view.at(coord) == T(0)) {
return false;
}
} else {
if (view.at(coord) != T(0)) {
return false;
}
}
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, tensor_op_64x64x32_64x64x8) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 32;
//
// The following tests were used to develop the OutputTileOptimalThreadMap
// metaprogram. The definitions in the disabled blocks of code in this and
// the following tests are hand-written quantities. They are expected to
// match what is defined in the ThreadMap.
//
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<64, 8, 1, 1,
1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 8, 1, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<64, 64>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<64, // column
8, // row
1, // group
1, // cluster
1 // iterations
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
1, // group
1, // cluster
1 // iterations
>;
using Delta =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
2, // row
1, // group
1, // cluster
1 // iterations
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
8, // row
1, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{64, 64};
cutlass::MatrixCoord output_extent{62, 56};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
hipLaunchKernelGGL(( test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>)
, dim3(grid), dim3(block), 0, 0, iterator_params, host_tensor.device_ref(),
output_extent);
hipError_t result = hipDeviceSynchronize();
ASSERT_EQ(result, hipSuccess) << hipGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("tensor_op_64x64x32_64x64x8.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, tensor_op_128x64x32_64x64x8) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 64;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<128, 8, 2,
1, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<64, 128>, kThreads,
kElementsPerAccess>;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<64, // column
8, // row
2, // group
1, // cluster
8 // tile
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
2, // row
2, // group
1, // cluster
1 // iterations
>;
using Delta = cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
64, // group
1, // cluster
1 // tile
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
8, // row
1, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{128, 64};
cutlass::MatrixCoord output_extent{125, 56};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
hipLaunchKernelGGL(( test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>)
, dim3(grid), dim3(block), 0, 0, iterator_params, host_tensor.device_ref(),
output_extent);
hipError_t result = hipDeviceSynchronize();
ASSERT_EQ(result, hipSuccess) << hipGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("tensor_op_128x64x32_64x64x8.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, tensor_op_128x256x32_64x64x8) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 256;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<256, 8, 2,
1, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<256, 128>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<256, // column
8, // row
2, // group
1, // cluster
8 // tile
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
2, // row
2, // group
1, // cluster
1 // iterations
>;
using Delta = cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
64, // group
1, // cluster
1 // tile
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
8, // row
1, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{128, 256};
cutlass::MatrixCoord output_extent{123, 252};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
hipLaunchKernelGGL(( test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>)
, dim3(grid), dim3(block), 0, 0, iterator_params, host_tensor.device_ref(),
output_extent);
hipError_t result = hipDeviceSynchronize();
ASSERT_EQ(result, hipSuccess) << hipGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("tensor_op_128x256x32_64x64x8.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, volta_tensor_op_64x64x32_64x64x4) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 32;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<64, 2, 4, 1,
1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<64, 8>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<64, // column
2, // row
4, // group
1, // cluster
8 // iterations
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
1, // row
4, // group
1, // cluster
1 // iterations
>;
using Delta =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
1, // row
8, // group
1, // cluster
1 // iterations
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
2, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{64, 64};
cutlass::MatrixCoord output_extent{62, 56};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
hipLaunchKernelGGL(( test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>)
, dim3(grid), dim3(block), 0, 0, iterator_params, host_tensor.device_ref(),
output_extent);
hipError_t result = hipDeviceSynchronize();
ASSERT_EQ(result, hipSuccess) << hipGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("volta_tensor_op_64x64x32_64x64x4.csv");
output << host_tensor.host_view();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, volta_tensor_op_64x128x32_32x64x4) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 128;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4,
1, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<128, 8>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<128, // column
2, // row
2, // group
2, // cluster
8 // iterations
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
1, // row
1, // group
2, // cluster
1 // iterations
>;
using Delta =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
1, // row
8, // group
32, // cluster
1 // iterations
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
4, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{64, 128};
cutlass::MatrixCoord output_extent{57, 124};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
hipLaunchKernelGGL(( test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>)
, dim3(grid), dim3(block), 0, 0, iterator_params, host_tensor.device_ref(),
output_extent);
hipError_t result = hipDeviceSynchronize();
ASSERT_EQ(result, hipSuccess) << hipGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("volta_tensor_op_64x128x32_32x64x4.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, volta_tensor_op_128x256x32_64x64x4) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 256;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<256, 2, 4,
2, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<256, 16>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<256, // column
2, // row
4, // group
2, // cluster
8 // iterations
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
1, // row
2, // group
2, // cluster
1 // iterations
>;
using Delta =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
1, // row
16, // group
64, // cluster
1 // iterations
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
2, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{128, 256};
cutlass::MatrixCoord output_extent{128, 256};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
hipLaunchKernelGGL(( test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>)
, dim3(grid), dim3(block), 0, 0, iterator_params, host_tensor.device_ref(),
output_extent);
hipError_t result = hipDeviceSynchronize();
ASSERT_EQ(result, hipSuccess) << hipGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed || true) {
std::ofstream output("volta_tensor_op_128x256x32_64x64x4.csv");
output << host_tensor.host_view();
}
}
TEST(PredicatedTileIterator, volta_tensor_op_256x128x32_64x64x4) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 256;
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4,
4, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{256, 128};
cutlass::MatrixCoord output_extent{256, 128};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
hipLaunchKernelGGL(( test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>)
, dim3(grid), dim3(block), 0, 0, iterator_params, host_tensor.device_ref(),
output_extent);
hipError_t result = hipDeviceSynchronize();
ASSERT_EQ(result, hipSuccess) << hipGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed || true) {
std::ofstream output("volta_tensor_op_256x128x32_64x64x4.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, simt_32x64x8_32x64x1) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
32 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 32;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<64, 1, 4, 1,
1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<64, 4>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<64, // column
1, // row
4, // group
1, // cluster
1 // iterations
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<2, // column
1, // row
4, // group
1, // cluster
1 // iterations
>;
using Delta =
cutlass::epilogue::threadblock::OutputTileShape<32, // column
1, // row
4, // group
16, // cluster
1 // iterations
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
2, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{32, 64};
cutlass::MatrixCoord output_extent{27, 63};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
hipLaunchKernelGGL(( test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>)
, dim3(grid), dim3(block), 0, 0, iterator_params, host_tensor.device_ref(),
output_extent);
hipError_t result = hipDeviceSynchronize();
ASSERT_EQ(result, hipSuccess) << hipGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("simt_32x64x8_32x64x1.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, simt_128x128x8_32x64x1) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
32 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 256;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<128, 1, 4,
4, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<128, 16>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<128, // column
1, // row
4, // group
4, // cluster
1 // iterations
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<2, // column
1, // row
2, // group
4, // cluster
1 // iterations
>;
using Delta =
cutlass::epilogue::threadblock::OutputTileShape<32, // column
1, // row
8, // group
32, // cluster
1 // iterations
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
2, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{128, 128};
cutlass::MatrixCoord output_extent{123, 121};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
hipLaunchKernelGGL(( test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>)
, dim3(grid), dim3(block), 0, 0, iterator_params, host_tensor.device_ref(),
output_extent);
hipError_t result = hipDeviceSynchronize();
ASSERT_EQ(result, hipSuccess) << hipGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("simt_128x128x8_32x64x1.csv");
output << host_tensor.host_view();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
e7f7200c5fd1d2161aff36689ff0ad8052210cb3.cu
|
/***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename TileIterator>
__global__ void kernel_store_iterator(typename TileIterator::Params params,
typename TileIterator::TensorRef ref,
cutlass::MatrixCoord extent) {
TileIterator iterator(params, ref.data(), extent, threadIdx.x, {0, 0});
typename TileIterator::Fragment fragment;
CUTLASS_PRAGMA_NO_UNROLL
for (int iter = 0; iter < TileIterator::ThreadMap::Count::kTile; ++iter) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < TileIterator::Fragment::kElements; ++i) {
typename TileIterator::Element tidx(iter + 1);
fragment[i] = tidx;
}
iterator.store(fragment);
++iterator;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, typename Layout>
static bool verify_footprint(cutlass::TensorView<T, Layout> view,
cutlass::MatrixCoord extent) {
for (int r = 0; r < view.extent().row(); ++r) {
for (int c = 0; c < view.extent().column(); ++c) {
cutlass::MatrixCoord coord{r, c};
bool within = coord < extent;
if (within) {
if (view.at(coord) == T(0)) {
return false;
}
} else {
if (view.at(coord) != T(0)) {
return false;
}
}
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, tensor_op_64x64x32_64x64x8) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 32;
//
// The following tests were used to develop the OutputTileOptimalThreadMap
// metaprogram. The definitions in the disabled blocks of code in this and
// the following tests are hand-written quantities. They are expected to
// match what is defined in the ThreadMap.
//
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<64, 8, 1, 1,
1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 8, 1, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<64, 64>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<64, // column
8, // row
1, // group
1, // cluster
1 // iterations
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
1, // group
1, // cluster
1 // iterations
>;
using Delta =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
2, // row
1, // group
1, // cluster
1 // iterations
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
8, // row
1, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{64, 64};
cutlass::MatrixCoord output_extent{62, 56};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>
<<<grid, block>>>(iterator_params, host_tensor.device_ref(),
output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("tensor_op_64x64x32_64x64x8.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, tensor_op_128x64x32_64x64x8) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 64;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<128, 8, 2,
1, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<64, 128>, kThreads,
kElementsPerAccess>;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<64, // column
8, // row
2, // group
1, // cluster
8 // tile
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
2, // row
2, // group
1, // cluster
1 // iterations
>;
using Delta = cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
64, // group
1, // cluster
1 // tile
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
8, // row
1, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{128, 64};
cutlass::MatrixCoord output_extent{125, 56};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>
<<<grid, block>>>(iterator_params, host_tensor.device_ref(),
output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("tensor_op_128x64x32_64x64x8.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, tensor_op_128x256x32_64x64x8) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 256;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<256, 8, 2,
1, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<256, 128>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<256, // column
8, // row
2, // group
1, // cluster
8 // tile
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
2, // row
2, // group
1, // cluster
1 // iterations
>;
using Delta = cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
64, // group
1, // cluster
1 // tile
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
8, // row
1, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{128, 256};
cutlass::MatrixCoord output_extent{123, 252};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>
<<<grid, block>>>(iterator_params, host_tensor.device_ref(),
output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("tensor_op_128x256x32_64x64x8.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, volta_tensor_op_64x64x32_64x64x4) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 32;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<64, 2, 4, 1,
1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<64, 8>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<64, // column
2, // row
4, // group
1, // cluster
8 // iterations
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
1, // row
4, // group
1, // cluster
1 // iterations
>;
using Delta =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
1, // row
8, // group
1, // cluster
1 // iterations
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
2, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{64, 64};
cutlass::MatrixCoord output_extent{62, 56};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>
<<<grid, block>>>(iterator_params, host_tensor.device_ref(),
output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("volta_tensor_op_64x64x32_64x64x4.csv");
output << host_tensor.host_view();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, volta_tensor_op_64x128x32_32x64x4) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 128;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4,
1, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<128, 8>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<128, // column
2, // row
2, // group
2, // cluster
8 // iterations
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
1, // row
1, // group
2, // cluster
1 // iterations
>;
using Delta =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
1, // row
8, // group
32, // cluster
1 // iterations
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
4, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{64, 128};
cutlass::MatrixCoord output_extent{57, 124};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>
<<<grid, block>>>(iterator_params, host_tensor.device_ref(),
output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("volta_tensor_op_64x128x32_32x64x4.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, volta_tensor_op_128x256x32_64x64x4) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 256;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<256, 2, 4,
2, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<256, 16>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<256, // column
2, // row
4, // group
2, // cluster
8 // iterations
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
1, // row
2, // group
2, // cluster
1 // iterations
>;
using Delta =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
1, // row
16, // group
64, // cluster
1 // iterations
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
2, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{128, 256};
cutlass::MatrixCoord output_extent{128, 256};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>
<<<grid, block>>>(iterator_params, host_tensor.device_ref(),
output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed || true) {
std::ofstream output("volta_tensor_op_128x256x32_64x64x4.csv");
output << host_tensor.host_view();
}
}
TEST(PredicatedTileIterator, volta_tensor_op_256x128x32_64x64x4) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 256;
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4,
4, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{256, 128};
cutlass::MatrixCoord output_extent{256, 128};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>
<<<grid, block>>>(iterator_params, host_tensor.device_ref(),
output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed || true) {
std::ofstream output("volta_tensor_op_256x128x32_64x64x4.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, simt_32x64x8_32x64x1) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
32 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 32;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<64, 1, 4, 1,
1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<64, 4>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<64, // column
1, // row
4, // group
1, // cluster
1 // iterations
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<2, // column
1, // row
4, // group
1, // cluster
1 // iterations
>;
using Delta =
cutlass::epilogue::threadblock::OutputTileShape<32, // column
1, // row
4, // group
16, // cluster
1 // iterations
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
2, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{32, 64};
cutlass::MatrixCoord output_extent{27, 63};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>
<<<grid, block>>>(iterator_params, host_tensor.device_ref(),
output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("simt_32x64x8_32x64x1.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, simt_128x128x8_32x64x1) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess =
32 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 256;
#if 1
using ThreadMap =
cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
cutlass::epilogue::threadblock::OutputTileShape<128, 1, 4,
4, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1,
8>,
kThreads, kElementsPerAccess,
cutlass::sizeof_bits<Element>::value>;
#else
using InternalThreadMap =
cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<128, 16>, kThreads,
kElementsPerAccess>;
using Shape =
cutlass::epilogue::threadblock::OutputTileShape<128, // column
1, // row
4, // group
4, // cluster
1 // iterations
>;
using Iterations =
cutlass::epilogue::threadblock::OutputTileShape<2, // column
1, // row
2, // group
4, // cluster
1 // iterations
>;
using Delta =
cutlass::epilogue::threadblock::OutputTileShape<32, // column
1, // row
8, // group
32, // cluster
1 // iterations
>;
using Count =
cutlass::epilogue::threadblock::OutputTileShape<1, // column
4, // row
2, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap, Shape, Iterations, Delta, Count>;
#endif
using PredicatedTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<ThreadMap,
Element>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{128, 128};
cutlass::MatrixCoord output_extent{123, 121};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(
host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator>
<<<grid, block>>>(iterator_params, host_tensor.device_ref(),
output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("simt_128x128x8_32x64x1.csv");
output << host_tensor.host_view();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
f4cb2a3f19dde56b7006c55b8d852d91e1f25e6f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <vector>
#include <iomanip>
#include <sstream>
#include <string>
#include <fstream>
#include <thread>
#include <ctime>
#include <stdio.h>
__device__ static inline void setSeed(int64_t *seed)
{
*seed = (*seed ^ 0x5deece66d) & ((1LL << 48) - 1);
}
__device__ static inline int next(int64_t *seed, const int bits)
{
*seed = (*seed * 0x5deece66d + 0xb) & ((1LL << 48) - 1);
return (int) (*seed >> (48 - bits));
}
__device__ static inline int nextInt(int64_t *seed, const int n)
{
int bits, val;
const int m = n - 1;
if((m & n) == 0) return (int) ((n * (int64_t)next(seed, 31)) >> 31);
do {
bits = next(seed, 31);
val = bits % n;
}
while (bits - val + m < 0);
return val;
}
struct Pos
{
int x, z;
};
__device__ class BoundingBox {
public:
Pos start;
Pos end;
__device__ static BoundingBox getBoundingBox(int minx, int miny, int minz, int maxx, int maxy, int maxz) {
BoundingBox box;
box.start.x = minx;
box.start.z = minz;
box.end.x = maxx;
box.end.z = maxz;
return box;
}
__device__ bool intersectsWith(BoundingBox box)
{
return this->end.x >= box.start.x && this->start.x <= box.end.x && this->end.z >= box.start.z && this->start.z <= box.end.z;
}
};
#define BLOCK_SIZE (128)
#define WORK_SIZE_BITS 16
#define SEEDS_PER_CALL ((1ULL << (WORK_SIZE_BITS)) * (BLOCK_SIZE))
#define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__)
inline void gpuAssert(hipError_t code, const char *file, int line) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", hipGetErrorString(code), code, file, line);
exit(code);
}
}
/*
Originally 64-bit seed value.
Mod 48 bit to get the 48 bit value.
Time could be any 64-bit value that when mod 48 gives the structure seed value.
We have the 48 bit post-mod 48 value
((8682522807148012UL * 181783497276652981UL)^x)%(1LL << 48) = someSeed
Take 48 bit seed value
Loop upper bits
Xor (8682522807148012UL * 181783497276652981UL) with upperBits Seed
Find seed that matches
*/
__device__ BoundingBox guessBox;
__device__ int64_t hardcoded = 8682522807148012L * 181783497276652981L;
typedef unsigned long long int uint64_cu;
__device__ static bool match(int64_t seed){
BoundingBox spawnBox;
Pos spawn;
spawn.x = 0;
spawn.z = 0;
int count = 0;
int64_t structureSeed = seed;
setSeed(&structureSeed);
nextInt(&structureSeed, 12000);
for(spawn.z = 0; (!spawnBox.intersectsWith(guessBox) && count <= 150) && !(spawn.z >= guessBox.end.z || spawn.x >= guessBox.end.x); spawn.z += nextInt(&structureSeed, 64) - nextInt(&structureSeed, 64))
{
spawn.x += nextInt(&structureSeed, 64) - nextInt(&structureSeed, 64);
spawnBox.start = spawn;
spawnBox.end = spawn;
count++;
}
if(spawnBox.intersectsWith(guessBox)){
return true;
}
return false;
}
__global__ __launch_bounds__(BLOCK_SIZE,2) static void threadWork(uint64_t offset, uint64_cu* underCounter, uint64_cu* overCounter, int64_t* buffer){
int64_t timeGuess = (blockIdx.x * blockDim.x + threadIdx.x) + offset;
uint64_t seedIndex = (blockIdx.x * blockDim.x + threadIdx.x);
int64_t seedGuess = hardcoded ^ timeGuess;
int64_t structureSeed = seedGuess;
BoundingBox spawnBox;
Pos spawn;
spawn.x = 0;
spawn.z = 0;
int count = 0;
setSeed(&structureSeed);
nextInt(&structureSeed, 12000);
for(spawn.z = 0; (!spawnBox.intersectsWith(guessBox) && count <= 150); spawn.z += nextInt(&structureSeed, 64) - nextInt(&structureSeed, 64))
{
spawn.x += nextInt(&structureSeed, 64) - nextInt(&structureSeed, 64);
spawnBox.start = spawn;
spawnBox.end = spawn;
count++;
}
if(spawn.z > guessBox.end.z || spawn.x > guessBox.end.x){
atomicAdd(underCounter, 1);
return;
}
if(spawn.z < guessBox.start.z || spawn.x < guessBox.start.x){
atomicAdd(overCounter, 1);
return;
}
}
__global__ __launch_bounds__(1,1) static void setupGuessBox(Pos guessMin, Pos guessMax){
guessBox.start = guessMin;
guessBox.end = guessMax;
}
int64_t* buffer;
uint32_t* counter;
uint64_cu* underCounter;
uint64_cu* overCounter;
int main(int argc, char **argv ){
int64_t startValue = 1282521600000;
int64_t total = 1282780799000;
time_t start = time(NULL);
FILE* fp = fopen("seananners-middlestep.txt", "w+");
double seconds_per_structure_seed = 0.0;
int thread = 0;
int curr = 0;
uint64_t amount = total - startValue;
int tmpCount = 0;
GPU_ASSERT(hipMallocManaged(&buffer, sizeof(int64_t) * SEEDS_PER_CALL));
GPU_ASSERT(hipPeekAtLastError());
GPU_ASSERT(hipDeviceSynchronize());
GPU_ASSERT(hipMallocManaged(&overCounter, sizeof(uint32_t)));
GPU_ASSERT(hipPeekAtLastError());
GPU_ASSERT(hipDeviceSynchronize());
GPU_ASSERT(hipMallocManaged(&underCounter, sizeof(uint32_t)));
GPU_ASSERT(hipPeekAtLastError());
GPU_ASSERT(hipDeviceSynchronize());
Pos guessMin;
Pos guessMax;
guessMin.x = 1710;
guessMin.z = 276;
guessMax.x = 1734;
guessMax.z = 348;
hipLaunchKernelGGL(( setupGuessBox), dim3(1),dim3(1), 0, 0, guessMin, guessMax);
hipSetDevice(0);
GPU_ASSERT(hipPeekAtLastError());
GPU_ASSERT(hipDeviceSynchronize());
//*counter = 0;
uint64_t countOut = 0;
uint64_t tempCount;
for(int64_t offset = 0; offset < amount; offset += SEEDS_PER_CALL){
int64_t value = startValue + offset;
value *= 1000;
hipLaunchKernelGGL(( threadWork), dim3(1ULL<<WORK_SIZE_BITS),dim3(BLOCK_SIZE), 0, 0, 0, 0, value, underCounter, overCounter, buffer);
GPU_ASSERT(hipPeekAtLastError());
GPU_ASSERT(hipDeviceSynchronize());
/*for(int i = 0; i < *counter; i++){
int64_t timeGuess = buffer[i];
if(timeGuess == -1){
*underCounter++;
}
if(timeGuess == 1){
*overCounter++;
}
}*/
if(countOut >= 1000000000){
time_t tempTime = time(NULL);
uint64_t tempDiff = tempTime - start;
double sps = (double)offset/(double)tempDiff;
double percent = ((double)offset/(double)amount) * 100.0;
printf("Seeds Per Second: %f\tProgress: %f\n", sps, percent);
countOut = 0;
}
//*counter = 0;
countOut += SEEDS_PER_CALL;
}
time_t end = time(NULL);
uint64_t diff = end - start;
double seedsPerSec = (double)total/(double)diff;
uint64_t tot = total - startValue;
printf("Time taken: %lld\nSeeds per second: %15.9f\nUnderCounter: %lld\nOverCounter: %lld\nTotal: %lld", diff, seedsPerSec, *underCounter, *overCounter, tot);
fclose(fp);
return 0;
}
|
f4cb2a3f19dde56b7006c55b8d852d91e1f25e6f.cu
|
#include <iostream>
#include <math.h>
#include <vector>
#include <iomanip>
#include <sstream>
#include <string>
#include <fstream>
#include <thread>
#include <ctime>
#include <stdio.h>
__device__ static inline void setSeed(int64_t *seed)
{
*seed = (*seed ^ 0x5deece66d) & ((1LL << 48) - 1);
}
__device__ static inline int next(int64_t *seed, const int bits)
{
*seed = (*seed * 0x5deece66d + 0xb) & ((1LL << 48) - 1);
return (int) (*seed >> (48 - bits));
}
__device__ static inline int nextInt(int64_t *seed, const int n)
{
int bits, val;
const int m = n - 1;
if((m & n) == 0) return (int) ((n * (int64_t)next(seed, 31)) >> 31);
do {
bits = next(seed, 31);
val = bits % n;
}
while (bits - val + m < 0);
return val;
}
struct Pos
{
int x, z;
};
__device__ class BoundingBox {
public:
Pos start;
Pos end;
__device__ static BoundingBox getBoundingBox(int minx, int miny, int minz, int maxx, int maxy, int maxz) {
BoundingBox box;
box.start.x = minx;
box.start.z = minz;
box.end.x = maxx;
box.end.z = maxz;
return box;
}
__device__ bool intersectsWith(BoundingBox box)
{
return this->end.x >= box.start.x && this->start.x <= box.end.x && this->end.z >= box.start.z && this->start.z <= box.end.z;
}
};
#define BLOCK_SIZE (128)
#define WORK_SIZE_BITS 16
#define SEEDS_PER_CALL ((1ULL << (WORK_SIZE_BITS)) * (BLOCK_SIZE))
#define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__)
inline void gpuAssert(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", cudaGetErrorString(code), code, file, line);
exit(code);
}
}
/*
Originally 64-bit seed value.
Mod 48 bit to get the 48 bit value.
Time could be any 64-bit value that when mod 48 gives the structure seed value.
We have the 48 bit post-mod 48 value
((8682522807148012UL * 181783497276652981UL)^x)%(1LL << 48) = someSeed
Take 48 bit seed value
Loop upper bits
Xor (8682522807148012UL * 181783497276652981UL) with upperBits Seed
Find seed that matches
*/
__device__ BoundingBox guessBox;
__device__ int64_t hardcoded = 8682522807148012L * 181783497276652981L;
typedef unsigned long long int uint64_cu;
__device__ static bool match(int64_t seed){
BoundingBox spawnBox;
Pos spawn;
spawn.x = 0;
spawn.z = 0;
int count = 0;
int64_t structureSeed = seed;
setSeed(&structureSeed);
nextInt(&structureSeed, 12000);
for(spawn.z = 0; (!spawnBox.intersectsWith(guessBox) && count <= 150) && !(spawn.z >= guessBox.end.z || spawn.x >= guessBox.end.x); spawn.z += nextInt(&structureSeed, 64) - nextInt(&structureSeed, 64))
{
spawn.x += nextInt(&structureSeed, 64) - nextInt(&structureSeed, 64);
spawnBox.start = spawn;
spawnBox.end = spawn;
count++;
}
if(spawnBox.intersectsWith(guessBox)){
return true;
}
return false;
}
__global__ __launch_bounds__(BLOCK_SIZE,2) static void threadWork(uint64_t offset, uint64_cu* underCounter, uint64_cu* overCounter, int64_t* buffer){
int64_t timeGuess = (blockIdx.x * blockDim.x + threadIdx.x) + offset;
uint64_t seedIndex = (blockIdx.x * blockDim.x + threadIdx.x);
int64_t seedGuess = hardcoded ^ timeGuess;
int64_t structureSeed = seedGuess;
BoundingBox spawnBox;
Pos spawn;
spawn.x = 0;
spawn.z = 0;
int count = 0;
setSeed(&structureSeed);
nextInt(&structureSeed, 12000);
for(spawn.z = 0; (!spawnBox.intersectsWith(guessBox) && count <= 150); spawn.z += nextInt(&structureSeed, 64) - nextInt(&structureSeed, 64))
{
spawn.x += nextInt(&structureSeed, 64) - nextInt(&structureSeed, 64);
spawnBox.start = spawn;
spawnBox.end = spawn;
count++;
}
if(spawn.z > guessBox.end.z || spawn.x > guessBox.end.x){
atomicAdd(underCounter, 1);
return;
}
if(spawn.z < guessBox.start.z || spawn.x < guessBox.start.x){
atomicAdd(overCounter, 1);
return;
}
}
__global__ __launch_bounds__(1,1) static void setupGuessBox(Pos guessMin, Pos guessMax){
guessBox.start = guessMin;
guessBox.end = guessMax;
}
int64_t* buffer;
uint32_t* counter;
uint64_cu* underCounter;
uint64_cu* overCounter;
int main(int argc, char **argv ){
int64_t startValue = 1282521600000;
int64_t total = 1282780799000;
time_t start = time(NULL);
FILE* fp = fopen("seananners-middlestep.txt", "w+");
double seconds_per_structure_seed = 0.0;
int thread = 0;
int curr = 0;
uint64_t amount = total - startValue;
int tmpCount = 0;
GPU_ASSERT(cudaMallocManaged(&buffer, sizeof(int64_t) * SEEDS_PER_CALL));
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
GPU_ASSERT(cudaMallocManaged(&overCounter, sizeof(uint32_t)));
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
GPU_ASSERT(cudaMallocManaged(&underCounter, sizeof(uint32_t)));
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
Pos guessMin;
Pos guessMax;
guessMin.x = 1710;
guessMin.z = 276;
guessMax.x = 1734;
guessMax.z = 348;
setupGuessBox<<<1,1>>>(guessMin, guessMax);
cudaSetDevice(0);
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
//*counter = 0;
uint64_t countOut = 0;
uint64_t tempCount;
for(int64_t offset = 0; offset < amount; offset += SEEDS_PER_CALL){
int64_t value = startValue + offset;
value *= 1000;
threadWork<<<1ULL<<WORK_SIZE_BITS,BLOCK_SIZE>>>(value, underCounter, overCounter, buffer);
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
/*for(int i = 0; i < *counter; i++){
int64_t timeGuess = buffer[i];
if(timeGuess == -1){
*underCounter++;
}
if(timeGuess == 1){
*overCounter++;
}
}*/
if(countOut >= 1000000000){
time_t tempTime = time(NULL);
uint64_t tempDiff = tempTime - start;
double sps = (double)offset/(double)tempDiff;
double percent = ((double)offset/(double)amount) * 100.0;
printf("Seeds Per Second: %f\tProgress: %f\n", sps, percent);
countOut = 0;
}
//*counter = 0;
countOut += SEEDS_PER_CALL;
}
time_t end = time(NULL);
uint64_t diff = end - start;
double seedsPerSec = (double)total/(double)diff;
uint64_t tot = total - startValue;
printf("Time taken: %lld\nSeeds per second: %15.9f\nUnderCounter: %lld\nOverCounter: %lld\nTotal: %lld", diff, seedsPerSec, *underCounter, *overCounter, tot);
fclose(fp);
return 0;
}
|
c7b98d9aaae3539b3ef5eb4604387d6da8be9e84.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019 Trail of Bits, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "output.h"
#include <stdint.h>
#include <stdbool.h>
#include <inttypes.h>
#include "gpu_common.h"
#ifndef __FILE_NAME__
#define __FILE_NAME__ "GPU_COMMON"
#endif
device_info_t CUDA_DEVICES[MAX_CUDA_DEVICES] = {0};
int avilable_gpus = 0;
extern "C" {
int GPU_LIMIT = 0;
}
// figure out how many GPUs we have
// and how many threads/blocks to use for
// each GPU
extern "C" void gpu_get_device_info(void) {
int devices = 0;
hipGetDeviceCount(&devices);
if(devices > MAX_CUDA_DEVICES) {
log_output(__FILE_NAME__, "Found more than %d devices, only using %d\n", MAX_CUDA_DEVICES, MAX_CUDA_DEVICES);
devices = MAX_CUDA_DEVICES;
} else {
log_output(__FILE_NAME__, "Found %d devices\n", devices);
}
if(GPU_LIMIT > 0 && devices > GPU_LIMIT) {
log_output(__FILE_NAME__, "Artificially limiting to %d of %d GPUs\n", GPU_LIMIT, devices);
avilable_gpus = GPU_LIMIT;
} else{
avilable_gpus = devices;
}
for (int i = 0; i < avilable_gpus; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
log_output(__FILE_NAME__, "GPU[%d]: Device Number: %d\n", i, i);
log_output(__FILE_NAME__, "GPU[%d]: Device name: %s\n", i, prop.name);
log_output(__FILE_NAME__, "GPU[%d]: Clock Rate (KHz): %d\n", i,
prop.clockRate);
hipSetDevice(i);
int blocks;
int threads;
// find a decent threads/block combo
if(hipSuccess != hipOccupancyMaxPotentialBlockSize(&blocks, &threads, findit, 0, 0)) {
log_output(__FILE_NAME__, "GPU[%d]: Could not find max occupancy for GPU\n", i);
exit(-1);
}
log_output(__FILE_NAME__, "GPU[%d]: Blocks: %d\n", i, blocks);
log_output(__FILE_NAME__, "GPU[%d]: Threads: %d\n", i, threads);
CUDA_DEVICES[i].blocks = blocks;
CUDA_DEVICES[i].threads = threads;
CUDA_DEVICES[i].clockrate = prop.clockRate;
CUDA_DEVICES[i].id = i;
CUDA_DEVICES[i].finished = false;
}
}
// how many "work pieces" is the input space
// being divided into?
extern "C" int gpu_get_workers(void) {
int total_size = 0;
for(int i = 0; i < avilable_gpus; i++) {
int my_blocks = CUDA_DEVICES[i].blocks;
int my_threads = CUDA_DEVICES[i].threads;
int my_alloc = my_blocks * my_threads;
total_size += my_alloc;
}
return total_size;
}
|
c7b98d9aaae3539b3ef5eb4604387d6da8be9e84.cu
|
/*
* Copyright (c) 2019 Trail of Bits, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "output.h"
#include <stdint.h>
#include <stdbool.h>
#include <inttypes.h>
#include "gpu_common.h"
#ifndef __FILE_NAME__
#define __FILE_NAME__ "GPU_COMMON"
#endif
device_info_t CUDA_DEVICES[MAX_CUDA_DEVICES] = {0};
int avilable_gpus = 0;
extern "C" {
int GPU_LIMIT = 0;
}
// figure out how many GPUs we have
// and how many threads/blocks to use for
// each GPU
extern "C" void gpu_get_device_info(void) {
int devices = 0;
cudaGetDeviceCount(&devices);
if(devices > MAX_CUDA_DEVICES) {
log_output(__FILE_NAME__, "Found more than %d devices, only using %d\n", MAX_CUDA_DEVICES, MAX_CUDA_DEVICES);
devices = MAX_CUDA_DEVICES;
} else {
log_output(__FILE_NAME__, "Found %d devices\n", devices);
}
if(GPU_LIMIT > 0 && devices > GPU_LIMIT) {
log_output(__FILE_NAME__, "Artificially limiting to %d of %d GPUs\n", GPU_LIMIT, devices);
avilable_gpus = GPU_LIMIT;
} else{
avilable_gpus = devices;
}
for (int i = 0; i < avilable_gpus; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
log_output(__FILE_NAME__, "GPU[%d]: Device Number: %d\n", i, i);
log_output(__FILE_NAME__, "GPU[%d]: Device name: %s\n", i, prop.name);
log_output(__FILE_NAME__, "GPU[%d]: Clock Rate (KHz): %d\n", i,
prop.clockRate);
cudaSetDevice(i);
int blocks;
int threads;
// find a decent threads/block combo
if(cudaSuccess != cudaOccupancyMaxPotentialBlockSize(&blocks, &threads, findit, 0, 0)) {
log_output(__FILE_NAME__, "GPU[%d]: Could not find max occupancy for GPU\n", i);
exit(-1);
}
log_output(__FILE_NAME__, "GPU[%d]: Blocks: %d\n", i, blocks);
log_output(__FILE_NAME__, "GPU[%d]: Threads: %d\n", i, threads);
CUDA_DEVICES[i].blocks = blocks;
CUDA_DEVICES[i].threads = threads;
CUDA_DEVICES[i].clockrate = prop.clockRate;
CUDA_DEVICES[i].id = i;
CUDA_DEVICES[i].finished = false;
}
}
// how many "work pieces" is the input space
// being divided into?
extern "C" int gpu_get_workers(void) {
int total_size = 0;
for(int i = 0; i < avilable_gpus; i++) {
int my_blocks = CUDA_DEVICES[i].blocks;
int my_threads = CUDA_DEVICES[i].threads;
int my_alloc = my_blocks * my_threads;
total_size += my_alloc;
}
return total_size;
}
|
c180a5bfb4f8b1c6c9a9674b30948ba78e7ce67c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "based.h"
__global__ void d_normlize(hipComplex* d_in,const int len,hipComplex* v,hipComplex* a){
int tidx = threadIdx.x;
const int bidx = blockIdx.x;
int tid = blockIdx.x*blockDim.x+threadIdx.x;
int t_n = blockDim.x;
__shared__ float temp_real[512];
__shared__ float temp_image[512];
float t_real = 0.0;
float t_image = 0.0;
while(tidx < len){
t_real += d_in[tidx+bidx*len].x * d_in[tidx+bidx*len].x;
t_image += d_in[tidx+bidx*len].y * d_in[tidx+bidx*len].y;
tidx += t_n;
}
tidx = threadIdx.x;
temp_real[tidx] = t_real;
temp_image[tidx] = t_image;
__syncthreads();
int i = 512/2;
while(i != 0){
if(tidx < i){
temp_real[tidx] += temp_real[tidx+i];
temp_image[tidx] += temp_image[tidx+i];
}
i /= 2;
}
if(tidx == 0){
temp_real[0]+=temp_image[0];
}
__syncthreads();
if(temp_real[0] < 1.e-100){
if(tidx == 0)
{
temp_real[0]= 2*len;
}
__syncthreads();
while(tidx < len){
d_in[ tidx + bidx*len ].x = 1;
d_in[ tidx + bidx*len ].y = 1;
tidx += t_n;
}
tidx = threadIdx.x;
__syncthreads();
}
if( tidx == 0){
a[bidx].x = sqrt(temp_real[0]);
a[bidx].y = 0;
}
__syncthreads();
while( tidx < len){
v[tidx + bidx*len].x = d_in[tidx + bidx*len].x/a[bidx].x;
v[tidx + bidx*len].y = d_in[tidx + bidx*len].y/a[bidx].x;
tidx += t_n;
}
}
void normlize(hipComplex* d_in,const int len,const int batch,hipComplex* d_v,hipComplex* d_a){
int threads = 512;
int blocks = batch;
hipLaunchKernelGGL(( d_normlize), dim3(blocks),dim3(threads), 0, 0, d_in,len,d_v,d_a);
}
|
c180a5bfb4f8b1c6c9a9674b30948ba78e7ce67c.cu
|
#include "based.h"
__global__ void d_normlize(cuComplex* d_in,const int len,cuComplex* v,cuComplex* a){
int tidx = threadIdx.x;
const int bidx = blockIdx.x;
int tid = blockIdx.x*blockDim.x+threadIdx.x;
int t_n = blockDim.x;
__shared__ float temp_real[512];
__shared__ float temp_image[512];
float t_real = 0.0;
float t_image = 0.0;
while(tidx < len){
t_real += d_in[tidx+bidx*len].x * d_in[tidx+bidx*len].x;
t_image += d_in[tidx+bidx*len].y * d_in[tidx+bidx*len].y;
tidx += t_n;
}
tidx = threadIdx.x;
temp_real[tidx] = t_real;
temp_image[tidx] = t_image;
__syncthreads();
int i = 512/2;
while(i != 0){
if(tidx < i){
temp_real[tidx] += temp_real[tidx+i];
temp_image[tidx] += temp_image[tidx+i];
}
i /= 2;
}
if(tidx == 0){
temp_real[0]+=temp_image[0];
}
__syncthreads();
if(temp_real[0] < 1.e-100){
if(tidx == 0)
{
temp_real[0]= 2*len;
}
__syncthreads();
while(tidx < len){
d_in[ tidx + bidx*len ].x = 1;
d_in[ tidx + bidx*len ].y = 1;
tidx += t_n;
}
tidx = threadIdx.x;
__syncthreads();
}
if( tidx == 0){
a[bidx].x = sqrt(temp_real[0]);
a[bidx].y = 0;
}
__syncthreads();
while( tidx < len){
v[tidx + bidx*len].x = d_in[tidx + bidx*len].x/a[bidx].x;
v[tidx + bidx*len].y = d_in[tidx + bidx*len].y/a[bidx].x;
tidx += t_n;
}
}
void normlize(cuComplex* d_in,const int len,const int batch,cuComplex* d_v,cuComplex* d_a){
int threads = 512;
int blocks = batch;
d_normlize<<<blocks,threads>>>(d_in,len,d_v,d_a);
}
|
0c865b4fa7efd8680f362b06a1737baf5427a72b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void translate_3D(float* coords, size_t dim_z, size_t dim_y, size_t dim_x, float seg_z, float seg_y, float seg_x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t total = dim_x * dim_y * dim_z;
if(index < total){
coords[index] += seg_z;
coords[index + total] += seg_y;
coords[index + total * 2] += seg_x;
__syncthreads();
}
}
|
0c865b4fa7efd8680f362b06a1737baf5427a72b.cu
|
#include "includes.h"
__global__ void translate_3D(float* coords, size_t dim_z, size_t dim_y, size_t dim_x, float seg_z, float seg_y, float seg_x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t total = dim_x * dim_y * dim_z;
if(index < total){
coords[index] += seg_z;
coords[index + total] += seg_y;
coords[index + total * 2] += seg_x;
__syncthreads();
}
}
|
0e799bef05823d55598839de983638b0bc5670f3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include "caffe/layers/scale_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
__global__ void ScaleForward(const int n, const real_t* in,
const real_t* scale, const int scale_dim, const int inner_dim,
real_t* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index];
}
}
__global__ void ScaleBiasForward(const int n, const real_t* in,
const real_t* scale, const real_t* bias,
const int scale_dim, const int inner_dim, real_t* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index] + bias[scale_index];
}
}
void ScaleLayer::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const int count = top[0]->count();
const real_t* bottom_data = bottom[0]->gpu_data();
if (bottom[0] == top[0]) {
// in-place computation; need to store bottom data before overwriting it.
// Note that this is only necessary for Backward; we could skip this if not
// doing Backward, but Caffe currently provides no way of knowing whether
// we'll need to do Backward at the time of the Forward call.
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(),
temp_.mutable_gpu_data());
}
const real_t* scale_data =
((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data();
real_t* top_data = top[0]->mutable_gpu_data();
if (bias_layer_) {
const real_t* bias_data = this->blobs_[bias_param_id_]->gpu_data();
ScaleBiasForward // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_,
top_data);
} else {
ScaleForward // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data);
}
}
} // namespace caffe
|
0e799bef05823d55598839de983638b0bc5670f3.cu
|
#include <cfloat>
#include <vector>
#include "caffe/layers/scale_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
__global__ void ScaleForward(const int n, const real_t* in,
const real_t* scale, const int scale_dim, const int inner_dim,
real_t* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index];
}
}
__global__ void ScaleBiasForward(const int n, const real_t* in,
const real_t* scale, const real_t* bias,
const int scale_dim, const int inner_dim, real_t* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index] + bias[scale_index];
}
}
void ScaleLayer::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const int count = top[0]->count();
const real_t* bottom_data = bottom[0]->gpu_data();
if (bottom[0] == top[0]) {
// in-place computation; need to store bottom data before overwriting it.
// Note that this is only necessary for Backward; we could skip this if not
// doing Backward, but Caffe currently provides no way of knowing whether
// we'll need to do Backward at the time of the Forward call.
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(),
temp_.mutable_gpu_data());
}
const real_t* scale_data =
((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data();
real_t* top_data = top[0]->mutable_gpu_data();
if (bias_layer_) {
const real_t* bias_data = this->blobs_[bias_param_id_]->gpu_data();
ScaleBiasForward // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_,
top_data);
} else {
ScaleForward // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data);
}
}
} // namespace caffe
|
7774095cdd3b3f3c48519ef1f6d4d4f02bc3e38f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _GLIBCXX_USE_CXX11_ABI 0
#define NUM_ROWS 1934
#define NUM_COLS 3440
#define BLOCK_SIZE 256
#define FILTER_SIZE 5
#define TILE_SIZE 12
#define BLOCK_SIZE_2D (TILE_SIZE + FILTER_SIZE - 1)
#include <opencv2/opencv.hpp>
#include <sys/time.h>
using namespace std;
using namespace cv;
unsigned char *greyPreviousImage_d;
typedef struct {
struct timeval startTime;
struct timeval endTime;
} Timer;
void startTime(Timer* timer) {
gettimeofday(&(timer->startTime), NULL);
}
void stopTime(Timer* timer) {
gettimeofday(&(timer->endTime), NULL);
}
float elapsedTime(Timer timer) {
return ((float)((timer.endTime.tv_sec - timer.startTime.tv_sec) \
+ (timer.endTime.tv_usec - timer.startTime.tv_usec) / 1.0e6));
}
//
__global__ void bgrToGreyscale(const unsigned char* const bgrImage, unsigned char* greyImage){
const long pointIndex = threadIdx.x + blockDim.x*blockIdx.x;
if (pointIndex < NUM_ROWS * NUM_COLS) {
long bgrIndex = pointIndex * 3;
unsigned char greyPoint = .299f*bgrImage[bgrIndex + 2] + .587f*bgrImage[bgrIndex + 1] + .114f*bgrImage[bgrIndex];
greyImage[pointIndex] = greyPoint;
}
}
//
__global__ void bgrToGreyscaleAndSubtract(const unsigned char* const bgrImage, unsigned char* diffImage, unsigned char* previousGreyImage) {
const long pointIndex = threadIdx.x + blockDim.x*blockIdx.x;
if (pointIndex < NUM_ROWS * NUM_COLS) {
long bgrIndex = pointIndex * 3;
unsigned char greyPoint = .299f*bgrImage[bgrIndex + 2] + .587f*bgrImage[bgrIndex + 1] + .114f*bgrImage[bgrIndex];
unsigned char previousGreyPoint = previousGreyImage[pointIndex];
diffImage[pointIndex] = greyPoint < previousGreyPoint ? 0 : greyPoint - previousGreyPoint;
previousGreyImage[pointIndex] = greyPoint;
}
}
__global__ void bgrAlignedToGreyscaleAndSubtract(const unsigned char* const bImage, const unsigned char* const gImage, const unsigned char* const rImage,
unsigned char* diffImage, unsigned char* previousGreyImage) {
const long pointIndex = threadIdx.x + blockDim.x*blockIdx.x;
if (pointIndex < NUM_ROWS * NUM_COLS) {
unsigned char greyPoint = .299f*rImage[pointIndex] + .587f*gImage[pointIndex] + .114f*bImage[pointIndex];
unsigned char previousGreyPoint = previousGreyImage[pointIndex];
diffImage[pointIndex] = greyPoint < previousGreyPoint ? 0 : greyPoint - previousGreyPoint;
previousGreyImage[pointIndex] = greyPoint;
}
}
__global__ void cudaSubtract(const unsigned char* const newGreyImage, unsigned char* previousGreyImage, unsigned char* diffImage) {
const long pointIndex = threadIdx.x + blockDim.x*blockIdx.x;
if (pointIndex < NUM_ROWS * NUM_COLS) {
unsigned char greyPoint = newGreyImage[pointIndex];
unsigned char previousGreyPoint = previousGreyImage[pointIndex];
diffImage[pointIndex] = greyPoint < previousGreyPoint ? 0 : greyPoint - previousGreyPoint;
}
}
__global__ void cudaBlurAndThresh(const unsigned char* const in, unsigned char* out)
{
__shared__ float Ns[BLOCK_SIZE_2D][BLOCK_SIZE_2D];
int row_o = blockIdx.y * TILE_SIZE + threadIdx.y;
int col_o = blockIdx.x * TILE_SIZE + threadIdx.x;
int row_i = row_o - FILTER_SIZE / 2;
int col_i = col_o - FILTER_SIZE / 2;
if (row_i >= 0 && col_i >= 0 && row_i < NUM_ROWS && col_i < NUM_COLS)
Ns[threadIdx.y][threadIdx.x] = in[row_i * NUM_COLS + col_i];
else
Ns[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
if (threadIdx.x < TILE_SIZE && threadIdx.y < TILE_SIZE &&
row_o < NUM_ROWS && col_o < NUM_COLS) {
float sum = 0;
for (int i = 0; i < FILTER_SIZE; i++) {
for (int j = 0; j < FILTER_SIZE; j++) {
sum += Ns[threadIdx.y + i][threadIdx.x + j] / 25.0;
}
}
int rounded = round(sum);
out[row_o*NUM_COLS + col_o] = rounded > 30 ? 255 : 0;
}
}
unsigned char* makeCudaGreyOnDevice(unsigned char* bgrImage_h) {
hipError_t cuda_ret;
unsigned char *greyImage_d, *bgrImage_d;
cuda_ret = hipMalloc((void**)&greyImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to copy device memory" << endl;
cuda_ret = hipMalloc((void**)&bgrImage_d, 3 * NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to copy device memory" << endl;
cuda_ret = hipMemcpy(bgrImage_d, bgrImage_h, 3 * NUM_ROWS * NUM_COLS * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cuda_ret != hipSuccess)
cout << "Unable to copy device memory" << endl;
dim3 block_dim(BLOCK_SIZE, 1, 1);
const unsigned int blocks = NUM_COLS * NUM_ROWS / BLOCK_SIZE + ((NUM_COLS * NUM_ROWS) % BLOCK_SIZE ? 1 : 0);
dim3 grid_dim(blocks, 1, 1);
hipLaunchKernelGGL(( bgrToGreyscale) , dim3(grid_dim), dim3(block_dim), 0, 0, bgrImage_d, greyImage_d);
hipFree(bgrImage_d);
return greyImage_d;
}
unsigned char* makeCudaGrey(unsigned char* bgrImage_h) {
hipError_t cuda_ret;
unsigned char *greyImage_d, *bgrImage_d;
unsigned char *greyImage_h = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
cuda_ret = hipMalloc((void**)&greyImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to copy device memory" << endl;
cuda_ret = hipMalloc((void**)&bgrImage_d, 3 * NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to copy device memory" << endl;
cuda_ret = hipMemcpy(bgrImage_d, bgrImage_h, 3 * NUM_ROWS * NUM_COLS * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cuda_ret != hipSuccess)
cout << "Unable to copy device memory" << endl;
dim3 block_dim(BLOCK_SIZE, 1, 1);
const unsigned int blocks = NUM_COLS * NUM_ROWS / BLOCK_SIZE + ((NUM_COLS * NUM_ROWS) % BLOCK_SIZE ? 1 : 0);
dim3 grid_dim(blocks, 1, 1);
bgrToGreyscale << <grid_dim, block_dim >> > (bgrImage_d, greyImage_d);
cuda_ret = hipMemcpy(greyImage_h, greyImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char), hipMemcpyDeviceToHost);
if (cuda_ret != hipSuccess)
cout << "Unable to copy host memory" << endl;
hipFree(bgrImage_d);
hipFree(greyImage_d);
return greyImage_h;
}
unsigned char* makeCudaAlignedGreyAndSubtractAndBlurAndThresh(unsigned char* bImg_h, unsigned char* gImg_h, unsigned char* rImg_h) {
hipError_t cuda_ret;
unsigned char *diffImage_d, *bImage_d, *gImage_d, *rImage_d;
unsigned char *threshedImage_h = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
unsigned char *threshedImage_d;
cuda_ret = hipMalloc((void**)&threshedImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to allocate device memory blurImage_d" << endl;
cuda_ret = hipMalloc((void**)&diffImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to allocate device memory diffImage_d" << endl;
cuda_ret = hipMalloc((void**)&bImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to allocate device memory bgrImage_d" << endl;
cuda_ret = hipMalloc((void**)&gImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to allocate device memory bgrImage_d" << endl;
cuda_ret = hipMalloc((void**)&rImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to allocate device memory bgrImage_d" << endl;
cuda_ret = hipMemcpy(bImage_d, bImg_h, NUM_ROWS * NUM_COLS * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cuda_ret != hipSuccess)
cout << "Unable to copy device memory bgrImage_d" << endl;
cuda_ret = hipMemcpy(gImage_d, gImg_h, NUM_ROWS * NUM_COLS * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cuda_ret != hipSuccess)
cout << "Unable to copy device memory bgrImage_d" << endl;
cuda_ret = hipMemcpy(rImage_d, rImg_h, NUM_ROWS * NUM_COLS * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cuda_ret != hipSuccess)
cout << "Unable to copy device memory bgrImage_d" << endl;
dim3 block_dim(BLOCK_SIZE, 1, 1);
const unsigned int blocks = NUM_COLS * NUM_ROWS / BLOCK_SIZE + ((NUM_COLS * NUM_ROWS) % BLOCK_SIZE ? 1 : 0);
dim3 grid_dim(blocks, 1, 1);
bgrAlignedToGreyscaleAndSubtract << <grid_dim, block_dim >> >(bImage_d, gImage_d, rImage_d, diffImage_d, greyPreviousImage_d);
block_dim.x = BLOCK_SIZE_2D;
block_dim.y = BLOCK_SIZE_2D;
block_dim.z = 1;
grid_dim.x = NUM_COLS / TILE_SIZE;
if (NUM_COLS%TILE_SIZE != 0) grid_dim.x++;
grid_dim.y = NUM_ROWS / TILE_SIZE;
if (NUM_ROWS%TILE_SIZE != 0) grid_dim.y++;
grid_dim.z = 1;
cudaBlurAndThresh << <grid_dim, block_dim >> > (diffImage_d, threshedImage_d);
cuda_ret = hipMemcpy(threshedImage_h, threshedImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char), hipMemcpyDeviceToHost);
if (cuda_ret != hipSuccess)
cout << "Unable to copy host memory blurImage_h" << endl;
hipFree(bImage_d);
hipFree(gImage_d);
hipFree(rImage_d);
hipFree(diffImage_d);
hipFree(threshedImage_d);
return threshedImage_h;
}
unsigned char* makeCudaGreyAndSubtractAndBlurAndThresh(unsigned char* bgrImg_h) {
hipError_t cuda_ret;
unsigned char *diffImage_d, *bgrImage_d;
unsigned char *threshedImage_h = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
unsigned char *threshedImage_d;
cuda_ret = hipMalloc((void**)&threshedImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to allocate device memory blurImage_d" << endl;
cuda_ret = hipMalloc((void**)&diffImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to allocate device memory diffImage_d" << endl;
cuda_ret = hipMalloc((void**)&bgrImage_d, 3 * NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to allocate device memory bgrImage_d" << endl;
cuda_ret = hipMemcpy(bgrImage_d, bgrImg_h, 3 * NUM_ROWS * NUM_COLS * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cuda_ret != hipSuccess)
cout << "Unable to copy device memory bgrImage_d" << endl;
dim3 block_dim(BLOCK_SIZE, 1, 1);
const unsigned int blocks = NUM_COLS * NUM_ROWS / BLOCK_SIZE + ((NUM_COLS * NUM_ROWS) % BLOCK_SIZE ? 1 : 0);
dim3 grid_dim(blocks, 1, 1);
bgrToGreyscaleAndSubtract << <grid_dim, block_dim >> >(bgrImage_d, diffImage_d, greyPreviousImage_d);
block_dim.x = BLOCK_SIZE_2D;
block_dim.y = BLOCK_SIZE_2D;
block_dim.z = 1;
grid_dim.x = NUM_COLS / TILE_SIZE;
if (NUM_COLS%TILE_SIZE != 0) grid_dim.x++;
grid_dim.y = NUM_ROWS / TILE_SIZE;
if (NUM_ROWS%TILE_SIZE != 0) grid_dim.y++;
grid_dim.z = 1;
cudaBlurAndThresh << <grid_dim, block_dim >> > (diffImage_d, threshedImage_d);
cuda_ret = hipMemcpy(threshedImage_h, threshedImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char), hipMemcpyDeviceToHost);
if (cuda_ret != hipSuccess)
cout << "Unable to copy host memory blurImage_h" << endl;
hipFree(bgrImage_d);
hipFree(diffImage_d);
hipFree(threshedImage_d);
return threshedImage_h;
}
unsigned char* makeCudaSubtractAndBlurAndThresh(unsigned char* newImageGrey_h, unsigned char* previousImageGrey_h) {
hipError_t cuda_ret;
unsigned char *threshedImage_h = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
unsigned char *diffImage_d, *threshedImage_d, *previousImageGrey_d, *newImageGrey_d;
cuda_ret = hipMalloc((void**)&diffImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to allocate device memory diffImage_d" << endl;
cuda_ret = hipMalloc((void**)&threshedImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to allocate device memory threshedImage_d" << endl;
cuda_ret = hipMalloc((void**)&previousImageGrey_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to allocate device memory previousImageGrey_d" << endl;
cuda_ret = hipMalloc((void**)&newImageGrey_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != hipSuccess)
cout << "Unable to allocate device memory newImageGrey_d" << endl;
cuda_ret = hipMemcpy(newImageGrey_d, newImageGrey_h, NUM_ROWS * NUM_COLS * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cuda_ret != hipSuccess)
cout << "Unable to copy device memory newImageGrey_d" << endl;
cuda_ret = hipMemcpy(previousImageGrey_d, previousImageGrey_h, NUM_ROWS * NUM_COLS * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cuda_ret != hipSuccess)
cout << "Unable to copy device memory previousImageGrey_d" << endl;
dim3 block_dim(BLOCK_SIZE, 1, 1);
const unsigned int blocks = NUM_COLS * NUM_ROWS / BLOCK_SIZE + ((NUM_COLS * NUM_ROWS) % BLOCK_SIZE ? 1 : 0);
dim3 grid_dim(blocks, 1, 1);
hipLaunchKernelGGL(( cudaSubtract) , dim3(grid_dim), dim3(block_dim) , 0, 0, newImageGrey_d, previousImageGrey_d, diffImage_d);
block_dim.x = BLOCK_SIZE_2D;
block_dim.y = BLOCK_SIZE_2D;
block_dim.z = 1;
grid_dim.x = NUM_COLS / TILE_SIZE;
if (NUM_COLS%TILE_SIZE != 0) grid_dim.x++;
grid_dim.y = NUM_ROWS / TILE_SIZE;
if (NUM_ROWS%TILE_SIZE != 0) grid_dim.y++;
grid_dim.z = 1;
cudaBlurAndThresh << <grid_dim, block_dim >> > (diffImage_d, threshedImage_d);
cuda_ret = hipMemcpy(threshedImage_h, threshedImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char), hipMemcpyDeviceToHost);
if (cuda_ret != hipSuccess)
cout << "Unable to copy host memory threshedImage_h" << cuda_ret << endl;
hipFree(diffImage_d);
hipFree(threshedImage_d);
hipFree(previousImageGrey_d);
hipFree(newImageGrey_d);
return threshedImage_h;
}
unsigned char* makeCPUGrey(unsigned char* bgrImage) {
unsigned char *result = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
for (int pointIndex = 0; pointIndex < NUM_ROWS * NUM_COLS; pointIndex++) {
long bgrIndex = pointIndex * 3;
unsigned char greyPoint = .299f*bgrImage[bgrIndex + 2] + .587f*bgrImage[bgrIndex + 1] + .114f*bgrImage[bgrIndex];
result[pointIndex] = greyPoint;
}
return result;
}
unsigned char* makeCPUSubtractAndBlurAndThresh(unsigned char* newGrey, unsigned char* previousGrey) {
unsigned char *result = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
unsigned char *diff = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
for (int pointIndex = 0; pointIndex < NUM_ROWS * NUM_COLS; pointIndex++) {
diff[pointIndex] = newGrey[pointIndex] > previousGrey[pointIndex] ? newGrey[pointIndex] - previousGrey[pointIndex] : 0;
}
for (int row = 0; row < NUM_ROWS ; row++) {
for (int col = 0; col < NUM_COLS; col++) {
float sum = 0;
for (int row_d = row - 2; row_d <= row + 2; row_d++) {
for (int col_d = col - 2; col_d <= col + 2; col_d++) {
if (row_d > 0 && row_d < NUM_ROWS && col_d > 0 && col_d < NUM_COLS) {
sum += diff[row_d * NUM_COLS + col_d] / float(25);
}
}
}
unsigned char pixel = round(sum);
pixel = pixel > 30 ? 255 : 0;
result[row * NUM_COLS + col] = pixel;
}
}
free(diff);
return result;
}
void runOpenCV() {
Timer timer;
float runningTime = 0;
int contoursFound = 0;
cv::String path = cv::String("./frames/frames-%07d.png");
cv::VideoCapture cap(path);
cv::Mat previousImage;
cv::Mat previousGrey;
cap.read(previousImage);
startTime(&timer);
cv::cvtColor(previousImage, previousGrey, COLOR_BGR2GRAY);
stopTime(&timer);
runningTime += elapsedTime(timer);
while (cap.isOpened())
{
cv::Mat newImage;
cv::Mat newGrey;
cv::Mat diff;
if (!cap.read(newImage))
break;
int k = cap.get(CV_CAP_PROP_POS_FRAMES);
startTime(&timer);
cv::cvtColor(newImage, newGrey, COLOR_BGR2GRAY);
cv::subtract(newGrey, previousGrey, diff);
stopTime(&timer);
runningTime += elapsedTime(timer);
cv::Mat blurred;
startTime(&timer);
cv::blur(diff, blurred, Size(5,5));
stopTime(&timer);
runningTime += elapsedTime(timer);
cv::Mat thresholded;
startTime(&timer);
cv::threshold(blurred, thresholded, 30, 255, THRESH_BINARY);
stopTime(&timer);
runningTime += elapsedTime(timer);
vector<vector<Point> > contours;
cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
Rect brect = cv::boundingRect(contours[i]);
if (brect.area() >= 200) {
contoursFound++;
// cv::Mat cropped = newImage(brect);
// std::ostringstream ostr;
// ostr << "diff_" << k << "_" << k-1 << "_" << i << ".jpg";
// std::string theNumberString = ostr.str();
// cv::imwrite(theNumberString, cropped);
}
}
previousGrey = newGrey;
}
cout << "=== Open CV CPU version ===" << endl;
cout << "runningTime : " << runningTime << endl;
cout << "Contrours found: " << contoursFound << endl;
}
void runCudaSimple() {
Timer timer;
float runningTime = 0;
int contoursFound = 0;
cv::String path = cv::String("./frames/frames-%07d.png");
cv::VideoCapture cap(path);
cv::Mat previousImage;
cap.read(previousImage);
cv::Mat greyPrevious;
startTime(&timer);
unsigned char* previousImageGrey = makeCudaGrey(previousImage.data);
stopTime(&timer);
runningTime += elapsedTime(timer);
while (cap.isOpened())
{
cv::Mat newImage;
if (!cap.read(newImage))
break;
int k = cap.get(CV_CAP_PROP_POS_FRAMES);
startTime(&timer);
unsigned char* newImageGrey = makeCudaGrey(newImage.data);
unsigned char* thresholdedData = makeCudaSubtractAndBlurAndThresh(newImageGrey, previousImageGrey);
stopTime(&timer);
runningTime += elapsedTime(timer);
cv::Mat thresholded = cv::Mat(NUM_ROWS, NUM_COLS, IMREAD_GRAYSCALE, thresholdedData);
vector<vector<Point> > contours;
cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
Rect brect = cv::boundingRect(contours[i]);
if (brect.area() >= 200) {
contoursFound++;
//cv::Mat cropped = newImage(brect);
//std::ostringstream ostr;
//ostr << "diff_" << k << "_" << k-1 << "_" << i << ".jpg";
//std::string theNumberString = ostr.str();
//cv::imwrite(theNumberString, cropped);
}
}
free(previousImageGrey);
previousImageGrey = newImageGrey;
free(thresholdedData);
}
free(previousImageGrey);
cout << "=== CUDA optimized convolution version ===" << endl;
cout << "Running time: " << runningTime << endl;
cout << "Contrours found: " << contoursFound << endl;
}
void runCudaGreyStaysOnDevice() {
Timer timer;
float runningTime = 0;
int contoursFound = 0;
cv::String path = cv::String("./frames/frames-%07d.png");
cv::VideoCapture cap(path);
cv::Mat previousImage;
cap.read(previousImage);
cv::Mat greyPrevious;
startTime(&timer);
greyPreviousImage_d = makeCudaGreyOnDevice(previousImage.data);
stopTime(&timer);
runningTime += elapsedTime(timer);
while (cap.isOpened())
{
cv::Mat newImage;
unsigned char* thresholdedData;
if (!cap.read(newImage))
break;
int k = cap.get(CV_CAP_PROP_POS_FRAMES);
startTime(&timer);
thresholdedData = makeCudaGreyAndSubtractAndBlurAndThresh(newImage.data);
stopTime(&timer);
runningTime += elapsedTime(timer);
cv::Mat thresholded = cv::Mat(NUM_ROWS, NUM_COLS, IMREAD_GRAYSCALE, thresholdedData);
vector<vector<Point> > contours;
cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
Rect brect = cv::boundingRect(contours[i]);
if (brect.area() >= 200) {
contoursFound++;
//cv::Mat cropped = newImage(brect);
//std::ostringstream ostr;
//ostr << "diff_" << k << "_" << k-1 << "_" << i << ".jpg";
//std::string theNumberString = ostr.str();
//cv::imwrite(theNumberString, cropped);
}
}
free(thresholdedData);
}
hipFree(greyPreviousImage_d);
cout << "=== CUDA optimized convolution + grey stays on device version ===" << endl;
cout << "Running time: " << runningTime << endl;
cout << "Contrours found: " << contoursFound << endl;
}
void runCudaGreyStaysOnDeviceAlignedBGR() {
Timer timer;
float runningTime = 0;
int contoursFound = 0;
cv::String path = cv::String("./frames/frames-%07d.png");
cv::VideoCapture cap(path);
cv::Mat previousImage;
cap.read(previousImage);
cv::Mat greyPrevious;
startTime(&timer);
greyPreviousImage_d = makeCudaGreyOnDevice(previousImage.data);
stopTime(&timer);
runningTime += elapsedTime(timer);
cv::Mat bgr[3];
while (cap.isOpened())
{
cv::Mat newImage;
unsigned char* thresholdedData;
if (!cap.read(newImage))
break;
int k = cap.get(CV_CAP_PROP_POS_FRAMES);
startTime(&timer);
split(newImage, bgr);
thresholdedData = makeCudaAlignedGreyAndSubtractAndBlurAndThresh(bgr[0].data, bgr[1].data, bgr[2].data);
stopTime(&timer);
runningTime += elapsedTime(timer);
cv::Mat thresholded = cv::Mat(NUM_ROWS, NUM_COLS, IMREAD_GRAYSCALE, thresholdedData);
vector<vector<Point> > contours;
cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
Rect brect = cv::boundingRect(contours[i]);
if (brect.area() >= 200) {
contoursFound++;
//cv::Mat cropped = newImage(brect);
//std::ostringstream ostr;
//ostr << "diff_" << k << "_" << k-1 << "_" << i << ".jpg";
//std::string theNumberString = ostr.str();
//cv::imwrite(theNumberString, cropped);
}
}
free(thresholdedData);
}
hipFree(greyPreviousImage_d);
cout << "=== CUDA optimized convolution + aligned BGR + grey stays on device version ===" << endl;
cout << "Running time: " << runningTime << endl;
cout << "Contrours found: " << contoursFound << endl;
}
void runCPU() {
Timer timer;
float runningTimeSec = 0;
int contoursFound = 0;
cv::String path = cv::String("./frames/frames-%07d.png");
cv::VideoCapture cap(path);
cv::Mat previousImage;
cap.read(previousImage);
startTime(&timer);
unsigned char* previousGrey = makeCPUGrey(previousImage.data);
stopTime(&timer);
runningTimeSec += elapsedTime(timer);
while (cap.isOpened())
{
cv::Mat newImage;
if (!cap.read(newImage))
break;
int k = cap.get(CV_CAP_PROP_POS_FRAMES);
startTime(&timer);
unsigned char* newGrey = makeCPUGrey(newImage.data);
unsigned char *thresholdedData = makeCPUSubtractAndBlurAndThresh(newGrey, previousGrey);
stopTime(&timer);
runningTimeSec += elapsedTime(timer);
cv::Mat thresholded = cv::Mat(NUM_ROWS, NUM_COLS, IMREAD_GRAYSCALE, thresholdedData);
vector<vector<Point> > contours;
cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
Rect brect = cv::boundingRect(contours[i]);
if (brect.area() >= 200) {
contoursFound++;
//cv::Mat cropped = newImage(brect);
//std::ostringstream ostr;
//ostr << "diff_" << k << "_" << k-1 << "_" << i << ".jpg";
//std::string theNumberString = ostr.str();
//cv::imwrite(theNumberString, cropped);
}
}
free(thresholdedData);
free(previousGrey);
previousGrey = newGrey;
}
free(previousGrey);
cout << "=== CPU version ===" << endl;
cout << "Running time sec: " << runningTimeSec << endl;
cout << "Contrours found: " << contoursFound << endl;
}
int main()
{
runCPU();
runCudaSimple();
runCudaGreyStaysOnDevice();
runCudaGreyStaysOnDeviceAlignedBGR();
runOpenCV();
return 0;
}
|
7774095cdd3b3f3c48519ef1f6d4d4f02bc3e38f.cu
|
#define _GLIBCXX_USE_CXX11_ABI 0
#define NUM_ROWS 1934
#define NUM_COLS 3440
#define BLOCK_SIZE 256
#define FILTER_SIZE 5
#define TILE_SIZE 12
#define BLOCK_SIZE_2D (TILE_SIZE + FILTER_SIZE - 1)
#include <opencv2/opencv.hpp>
#include <sys/time.h>
using namespace std;
using namespace cv;
unsigned char *greyPreviousImage_d;
typedef struct {
struct timeval startTime;
struct timeval endTime;
} Timer;
void startTime(Timer* timer) {
gettimeofday(&(timer->startTime), NULL);
}
void stopTime(Timer* timer) {
gettimeofday(&(timer->endTime), NULL);
}
float elapsedTime(Timer timer) {
return ((float)((timer.endTime.tv_sec - timer.startTime.tv_sec) \
+ (timer.endTime.tv_usec - timer.startTime.tv_usec) / 1.0e6));
}
//
__global__ void bgrToGreyscale(const unsigned char* const bgrImage, unsigned char* greyImage){
const long pointIndex = threadIdx.x + blockDim.x*blockIdx.x;
if (pointIndex < NUM_ROWS * NUM_COLS) {
long bgrIndex = pointIndex * 3;
unsigned char greyPoint = .299f*bgrImage[bgrIndex + 2] + .587f*bgrImage[bgrIndex + 1] + .114f*bgrImage[bgrIndex];
greyImage[pointIndex] = greyPoint;
}
}
//
__global__ void bgrToGreyscaleAndSubtract(const unsigned char* const bgrImage, unsigned char* diffImage, unsigned char* previousGreyImage) {
const long pointIndex = threadIdx.x + blockDim.x*blockIdx.x;
if (pointIndex < NUM_ROWS * NUM_COLS) {
long bgrIndex = pointIndex * 3;
unsigned char greyPoint = .299f*bgrImage[bgrIndex + 2] + .587f*bgrImage[bgrIndex + 1] + .114f*bgrImage[bgrIndex];
unsigned char previousGreyPoint = previousGreyImage[pointIndex];
diffImage[pointIndex] = greyPoint < previousGreyPoint ? 0 : greyPoint - previousGreyPoint;
previousGreyImage[pointIndex] = greyPoint;
}
}
__global__ void bgrAlignedToGreyscaleAndSubtract(const unsigned char* const bImage, const unsigned char* const gImage, const unsigned char* const rImage,
unsigned char* diffImage, unsigned char* previousGreyImage) {
const long pointIndex = threadIdx.x + blockDim.x*blockIdx.x;
if (pointIndex < NUM_ROWS * NUM_COLS) {
unsigned char greyPoint = .299f*rImage[pointIndex] + .587f*gImage[pointIndex] + .114f*bImage[pointIndex];
unsigned char previousGreyPoint = previousGreyImage[pointIndex];
diffImage[pointIndex] = greyPoint < previousGreyPoint ? 0 : greyPoint - previousGreyPoint;
previousGreyImage[pointIndex] = greyPoint;
}
}
__global__ void cudaSubtract(const unsigned char* const newGreyImage, unsigned char* previousGreyImage, unsigned char* diffImage) {
const long pointIndex = threadIdx.x + blockDim.x*blockIdx.x;
if (pointIndex < NUM_ROWS * NUM_COLS) {
unsigned char greyPoint = newGreyImage[pointIndex];
unsigned char previousGreyPoint = previousGreyImage[pointIndex];
diffImage[pointIndex] = greyPoint < previousGreyPoint ? 0 : greyPoint - previousGreyPoint;
}
}
__global__ void cudaBlurAndThresh(const unsigned char* const in, unsigned char* out)
{
__shared__ float Ns[BLOCK_SIZE_2D][BLOCK_SIZE_2D];
int row_o = blockIdx.y * TILE_SIZE + threadIdx.y;
int col_o = blockIdx.x * TILE_SIZE + threadIdx.x;
int row_i = row_o - FILTER_SIZE / 2;
int col_i = col_o - FILTER_SIZE / 2;
if (row_i >= 0 && col_i >= 0 && row_i < NUM_ROWS && col_i < NUM_COLS)
Ns[threadIdx.y][threadIdx.x] = in[row_i * NUM_COLS + col_i];
else
Ns[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
if (threadIdx.x < TILE_SIZE && threadIdx.y < TILE_SIZE &&
row_o < NUM_ROWS && col_o < NUM_COLS) {
float sum = 0;
for (int i = 0; i < FILTER_SIZE; i++) {
for (int j = 0; j < FILTER_SIZE; j++) {
sum += Ns[threadIdx.y + i][threadIdx.x + j] / 25.0;
}
}
int rounded = round(sum);
out[row_o*NUM_COLS + col_o] = rounded > 30 ? 255 : 0;
}
}
unsigned char* makeCudaGreyOnDevice(unsigned char* bgrImage_h) {
cudaError_t cuda_ret;
unsigned char *greyImage_d, *bgrImage_d;
cuda_ret = cudaMalloc((void**)&greyImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to copy device memory" << endl;
cuda_ret = cudaMalloc((void**)&bgrImage_d, 3 * NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to copy device memory" << endl;
cuda_ret = cudaMemcpy(bgrImage_d, bgrImage_h, 3 * NUM_ROWS * NUM_COLS * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_ret != cudaSuccess)
cout << "Unable to copy device memory" << endl;
dim3 block_dim(BLOCK_SIZE, 1, 1);
const unsigned int blocks = NUM_COLS * NUM_ROWS / BLOCK_SIZE + ((NUM_COLS * NUM_ROWS) % BLOCK_SIZE ? 1 : 0);
dim3 grid_dim(blocks, 1, 1);
bgrToGreyscale <<<grid_dim, block_dim>>> (bgrImage_d, greyImage_d);
cudaFree(bgrImage_d);
return greyImage_d;
}
unsigned char* makeCudaGrey(unsigned char* bgrImage_h) {
cudaError_t cuda_ret;
unsigned char *greyImage_d, *bgrImage_d;
unsigned char *greyImage_h = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
cuda_ret = cudaMalloc((void**)&greyImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to copy device memory" << endl;
cuda_ret = cudaMalloc((void**)&bgrImage_d, 3 * NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to copy device memory" << endl;
cuda_ret = cudaMemcpy(bgrImage_d, bgrImage_h, 3 * NUM_ROWS * NUM_COLS * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_ret != cudaSuccess)
cout << "Unable to copy device memory" << endl;
dim3 block_dim(BLOCK_SIZE, 1, 1);
const unsigned int blocks = NUM_COLS * NUM_ROWS / BLOCK_SIZE + ((NUM_COLS * NUM_ROWS) % BLOCK_SIZE ? 1 : 0);
dim3 grid_dim(blocks, 1, 1);
bgrToGreyscale << <grid_dim, block_dim >> > (bgrImage_d, greyImage_d);
cuda_ret = cudaMemcpy(greyImage_h, greyImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (cuda_ret != cudaSuccess)
cout << "Unable to copy host memory" << endl;
cudaFree(bgrImage_d);
cudaFree(greyImage_d);
return greyImage_h;
}
unsigned char* makeCudaAlignedGreyAndSubtractAndBlurAndThresh(unsigned char* bImg_h, unsigned char* gImg_h, unsigned char* rImg_h) {
cudaError_t cuda_ret;
unsigned char *diffImage_d, *bImage_d, *gImage_d, *rImage_d;
unsigned char *threshedImage_h = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
unsigned char *threshedImage_d;
cuda_ret = cudaMalloc((void**)&threshedImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to allocate device memory blurImage_d" << endl;
cuda_ret = cudaMalloc((void**)&diffImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to allocate device memory diffImage_d" << endl;
cuda_ret = cudaMalloc((void**)&bImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to allocate device memory bgrImage_d" << endl;
cuda_ret = cudaMalloc((void**)&gImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to allocate device memory bgrImage_d" << endl;
cuda_ret = cudaMalloc((void**)&rImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to allocate device memory bgrImage_d" << endl;
cuda_ret = cudaMemcpy(bImage_d, bImg_h, NUM_ROWS * NUM_COLS * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_ret != cudaSuccess)
cout << "Unable to copy device memory bgrImage_d" << endl;
cuda_ret = cudaMemcpy(gImage_d, gImg_h, NUM_ROWS * NUM_COLS * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_ret != cudaSuccess)
cout << "Unable to copy device memory bgrImage_d" << endl;
cuda_ret = cudaMemcpy(rImage_d, rImg_h, NUM_ROWS * NUM_COLS * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_ret != cudaSuccess)
cout << "Unable to copy device memory bgrImage_d" << endl;
dim3 block_dim(BLOCK_SIZE, 1, 1);
const unsigned int blocks = NUM_COLS * NUM_ROWS / BLOCK_SIZE + ((NUM_COLS * NUM_ROWS) % BLOCK_SIZE ? 1 : 0);
dim3 grid_dim(blocks, 1, 1);
bgrAlignedToGreyscaleAndSubtract << <grid_dim, block_dim >> >(bImage_d, gImage_d, rImage_d, diffImage_d, greyPreviousImage_d);
block_dim.x = BLOCK_SIZE_2D;
block_dim.y = BLOCK_SIZE_2D;
block_dim.z = 1;
grid_dim.x = NUM_COLS / TILE_SIZE;
if (NUM_COLS%TILE_SIZE != 0) grid_dim.x++;
grid_dim.y = NUM_ROWS / TILE_SIZE;
if (NUM_ROWS%TILE_SIZE != 0) grid_dim.y++;
grid_dim.z = 1;
cudaBlurAndThresh << <grid_dim, block_dim >> > (diffImage_d, threshedImage_d);
cuda_ret = cudaMemcpy(threshedImage_h, threshedImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (cuda_ret != cudaSuccess)
cout << "Unable to copy host memory blurImage_h" << endl;
cudaFree(bImage_d);
cudaFree(gImage_d);
cudaFree(rImage_d);
cudaFree(diffImage_d);
cudaFree(threshedImage_d);
return threshedImage_h;
}
unsigned char* makeCudaGreyAndSubtractAndBlurAndThresh(unsigned char* bgrImg_h) {
cudaError_t cuda_ret;
unsigned char *diffImage_d, *bgrImage_d;
unsigned char *threshedImage_h = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
unsigned char *threshedImage_d;
cuda_ret = cudaMalloc((void**)&threshedImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to allocate device memory blurImage_d" << endl;
cuda_ret = cudaMalloc((void**)&diffImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to allocate device memory diffImage_d" << endl;
cuda_ret = cudaMalloc((void**)&bgrImage_d, 3 * NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to allocate device memory bgrImage_d" << endl;
cuda_ret = cudaMemcpy(bgrImage_d, bgrImg_h, 3 * NUM_ROWS * NUM_COLS * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_ret != cudaSuccess)
cout << "Unable to copy device memory bgrImage_d" << endl;
dim3 block_dim(BLOCK_SIZE, 1, 1);
const unsigned int blocks = NUM_COLS * NUM_ROWS / BLOCK_SIZE + ((NUM_COLS * NUM_ROWS) % BLOCK_SIZE ? 1 : 0);
dim3 grid_dim(blocks, 1, 1);
bgrToGreyscaleAndSubtract << <grid_dim, block_dim >> >(bgrImage_d, diffImage_d, greyPreviousImage_d);
block_dim.x = BLOCK_SIZE_2D;
block_dim.y = BLOCK_SIZE_2D;
block_dim.z = 1;
grid_dim.x = NUM_COLS / TILE_SIZE;
if (NUM_COLS%TILE_SIZE != 0) grid_dim.x++;
grid_dim.y = NUM_ROWS / TILE_SIZE;
if (NUM_ROWS%TILE_SIZE != 0) grid_dim.y++;
grid_dim.z = 1;
cudaBlurAndThresh << <grid_dim, block_dim >> > (diffImage_d, threshedImage_d);
cuda_ret = cudaMemcpy(threshedImage_h, threshedImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (cuda_ret != cudaSuccess)
cout << "Unable to copy host memory blurImage_h" << endl;
cudaFree(bgrImage_d);
cudaFree(diffImage_d);
cudaFree(threshedImage_d);
return threshedImage_h;
}
unsigned char* makeCudaSubtractAndBlurAndThresh(unsigned char* newImageGrey_h, unsigned char* previousImageGrey_h) {
cudaError_t cuda_ret;
unsigned char *threshedImage_h = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
unsigned char *diffImage_d, *threshedImage_d, *previousImageGrey_d, *newImageGrey_d;
cuda_ret = cudaMalloc((void**)&diffImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to allocate device memory diffImage_d" << endl;
cuda_ret = cudaMalloc((void**)&threshedImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to allocate device memory threshedImage_d" << endl;
cuda_ret = cudaMalloc((void**)&previousImageGrey_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to allocate device memory previousImageGrey_d" << endl;
cuda_ret = cudaMalloc((void**)&newImageGrey_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char));
if (cuda_ret != cudaSuccess)
cout << "Unable to allocate device memory newImageGrey_d" << endl;
cuda_ret = cudaMemcpy(newImageGrey_d, newImageGrey_h, NUM_ROWS * NUM_COLS * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_ret != cudaSuccess)
cout << "Unable to copy device memory newImageGrey_d" << endl;
cuda_ret = cudaMemcpy(previousImageGrey_d, previousImageGrey_h, NUM_ROWS * NUM_COLS * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_ret != cudaSuccess)
cout << "Unable to copy device memory previousImageGrey_d" << endl;
dim3 block_dim(BLOCK_SIZE, 1, 1);
const unsigned int blocks = NUM_COLS * NUM_ROWS / BLOCK_SIZE + ((NUM_COLS * NUM_ROWS) % BLOCK_SIZE ? 1 : 0);
dim3 grid_dim(blocks, 1, 1);
cudaSubtract <<<grid_dim, block_dim >>>(newImageGrey_d, previousImageGrey_d, diffImage_d);
block_dim.x = BLOCK_SIZE_2D;
block_dim.y = BLOCK_SIZE_2D;
block_dim.z = 1;
grid_dim.x = NUM_COLS / TILE_SIZE;
if (NUM_COLS%TILE_SIZE != 0) grid_dim.x++;
grid_dim.y = NUM_ROWS / TILE_SIZE;
if (NUM_ROWS%TILE_SIZE != 0) grid_dim.y++;
grid_dim.z = 1;
cudaBlurAndThresh << <grid_dim, block_dim >> > (diffImage_d, threshedImage_d);
cuda_ret = cudaMemcpy(threshedImage_h, threshedImage_d, NUM_ROWS * NUM_COLS * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (cuda_ret != cudaSuccess)
cout << "Unable to copy host memory threshedImage_h" << cuda_ret << endl;
cudaFree(diffImage_d);
cudaFree(threshedImage_d);
cudaFree(previousImageGrey_d);
cudaFree(newImageGrey_d);
return threshedImage_h;
}
unsigned char* makeCPUGrey(unsigned char* bgrImage) {
unsigned char *result = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
for (int pointIndex = 0; pointIndex < NUM_ROWS * NUM_COLS; pointIndex++) {
long bgrIndex = pointIndex * 3;
unsigned char greyPoint = .299f*bgrImage[bgrIndex + 2] + .587f*bgrImage[bgrIndex + 1] + .114f*bgrImage[bgrIndex];
result[pointIndex] = greyPoint;
}
return result;
}
unsigned char* makeCPUSubtractAndBlurAndThresh(unsigned char* newGrey, unsigned char* previousGrey) {
unsigned char *result = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
unsigned char *diff = (unsigned char*)malloc(NUM_ROWS * NUM_COLS * sizeof(unsigned char));
for (int pointIndex = 0; pointIndex < NUM_ROWS * NUM_COLS; pointIndex++) {
diff[pointIndex] = newGrey[pointIndex] > previousGrey[pointIndex] ? newGrey[pointIndex] - previousGrey[pointIndex] : 0;
}
for (int row = 0; row < NUM_ROWS ; row++) {
for (int col = 0; col < NUM_COLS; col++) {
float sum = 0;
for (int row_d = row - 2; row_d <= row + 2; row_d++) {
for (int col_d = col - 2; col_d <= col + 2; col_d++) {
if (row_d > 0 && row_d < NUM_ROWS && col_d > 0 && col_d < NUM_COLS) {
sum += diff[row_d * NUM_COLS + col_d] / float(25);
}
}
}
unsigned char pixel = round(sum);
pixel = pixel > 30 ? 255 : 0;
result[row * NUM_COLS + col] = pixel;
}
}
free(diff);
return result;
}
void runOpenCV() {
Timer timer;
float runningTime = 0;
int contoursFound = 0;
cv::String path = cv::String("./frames/frames-%07d.png");
cv::VideoCapture cap(path);
cv::Mat previousImage;
cv::Mat previousGrey;
cap.read(previousImage);
startTime(&timer);
cv::cvtColor(previousImage, previousGrey, COLOR_BGR2GRAY);
stopTime(&timer);
runningTime += elapsedTime(timer);
while (cap.isOpened())
{
cv::Mat newImage;
cv::Mat newGrey;
cv::Mat diff;
if (!cap.read(newImage))
break;
int k = cap.get(CV_CAP_PROP_POS_FRAMES);
startTime(&timer);
cv::cvtColor(newImage, newGrey, COLOR_BGR2GRAY);
cv::subtract(newGrey, previousGrey, diff);
stopTime(&timer);
runningTime += elapsedTime(timer);
cv::Mat blurred;
startTime(&timer);
cv::blur(diff, blurred, Size(5,5));
stopTime(&timer);
runningTime += elapsedTime(timer);
cv::Mat thresholded;
startTime(&timer);
cv::threshold(blurred, thresholded, 30, 255, THRESH_BINARY);
stopTime(&timer);
runningTime += elapsedTime(timer);
vector<vector<Point> > contours;
cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
Rect brect = cv::boundingRect(contours[i]);
if (brect.area() >= 200) {
contoursFound++;
// cv::Mat cropped = newImage(brect);
// std::ostringstream ostr;
// ostr << "diff_" << k << "_" << k-1 << "_" << i << ".jpg";
// std::string theNumberString = ostr.str();
// cv::imwrite(theNumberString, cropped);
}
}
previousGrey = newGrey;
}
cout << "=== Open CV CPU version ===" << endl;
cout << "runningTime : " << runningTime << endl;
cout << "Contrours found: " << contoursFound << endl;
}
void runCudaSimple() {
Timer timer;
float runningTime = 0;
int contoursFound = 0;
cv::String path = cv::String("./frames/frames-%07d.png");
cv::VideoCapture cap(path);
cv::Mat previousImage;
cap.read(previousImage);
cv::Mat greyPrevious;
startTime(&timer);
unsigned char* previousImageGrey = makeCudaGrey(previousImage.data);
stopTime(&timer);
runningTime += elapsedTime(timer);
while (cap.isOpened())
{
cv::Mat newImage;
if (!cap.read(newImage))
break;
int k = cap.get(CV_CAP_PROP_POS_FRAMES);
startTime(&timer);
unsigned char* newImageGrey = makeCudaGrey(newImage.data);
unsigned char* thresholdedData = makeCudaSubtractAndBlurAndThresh(newImageGrey, previousImageGrey);
stopTime(&timer);
runningTime += elapsedTime(timer);
cv::Mat thresholded = cv::Mat(NUM_ROWS, NUM_COLS, IMREAD_GRAYSCALE, thresholdedData);
vector<vector<Point> > contours;
cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
Rect brect = cv::boundingRect(contours[i]);
if (brect.area() >= 200) {
contoursFound++;
//cv::Mat cropped = newImage(brect);
//std::ostringstream ostr;
//ostr << "diff_" << k << "_" << k-1 << "_" << i << ".jpg";
//std::string theNumberString = ostr.str();
//cv::imwrite(theNumberString, cropped);
}
}
free(previousImageGrey);
previousImageGrey = newImageGrey;
free(thresholdedData);
}
free(previousImageGrey);
cout << "=== CUDA optimized convolution version ===" << endl;
cout << "Running time: " << runningTime << endl;
cout << "Contrours found: " << contoursFound << endl;
}
void runCudaGreyStaysOnDevice() {
Timer timer;
float runningTime = 0;
int contoursFound = 0;
cv::String path = cv::String("./frames/frames-%07d.png");
cv::VideoCapture cap(path);
cv::Mat previousImage;
cap.read(previousImage);
cv::Mat greyPrevious;
startTime(&timer);
greyPreviousImage_d = makeCudaGreyOnDevice(previousImage.data);
stopTime(&timer);
runningTime += elapsedTime(timer);
while (cap.isOpened())
{
cv::Mat newImage;
unsigned char* thresholdedData;
if (!cap.read(newImage))
break;
int k = cap.get(CV_CAP_PROP_POS_FRAMES);
startTime(&timer);
thresholdedData = makeCudaGreyAndSubtractAndBlurAndThresh(newImage.data);
stopTime(&timer);
runningTime += elapsedTime(timer);
cv::Mat thresholded = cv::Mat(NUM_ROWS, NUM_COLS, IMREAD_GRAYSCALE, thresholdedData);
vector<vector<Point> > contours;
cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
Rect brect = cv::boundingRect(contours[i]);
if (brect.area() >= 200) {
contoursFound++;
//cv::Mat cropped = newImage(brect);
//std::ostringstream ostr;
//ostr << "diff_" << k << "_" << k-1 << "_" << i << ".jpg";
//std::string theNumberString = ostr.str();
//cv::imwrite(theNumberString, cropped);
}
}
free(thresholdedData);
}
cudaFree(greyPreviousImage_d);
cout << "=== CUDA optimized convolution + grey stays on device version ===" << endl;
cout << "Running time: " << runningTime << endl;
cout << "Contrours found: " << contoursFound << endl;
}
void runCudaGreyStaysOnDeviceAlignedBGR() {
Timer timer;
float runningTime = 0;
int contoursFound = 0;
cv::String path = cv::String("./frames/frames-%07d.png");
cv::VideoCapture cap(path);
cv::Mat previousImage;
cap.read(previousImage);
cv::Mat greyPrevious;
startTime(&timer);
greyPreviousImage_d = makeCudaGreyOnDevice(previousImage.data);
stopTime(&timer);
runningTime += elapsedTime(timer);
cv::Mat bgr[3];
while (cap.isOpened())
{
cv::Mat newImage;
unsigned char* thresholdedData;
if (!cap.read(newImage))
break;
int k = cap.get(CV_CAP_PROP_POS_FRAMES);
startTime(&timer);
split(newImage, bgr);
thresholdedData = makeCudaAlignedGreyAndSubtractAndBlurAndThresh(bgr[0].data, bgr[1].data, bgr[2].data);
stopTime(&timer);
runningTime += elapsedTime(timer);
cv::Mat thresholded = cv::Mat(NUM_ROWS, NUM_COLS, IMREAD_GRAYSCALE, thresholdedData);
vector<vector<Point> > contours;
cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
Rect brect = cv::boundingRect(contours[i]);
if (brect.area() >= 200) {
contoursFound++;
//cv::Mat cropped = newImage(brect);
//std::ostringstream ostr;
//ostr << "diff_" << k << "_" << k-1 << "_" << i << ".jpg";
//std::string theNumberString = ostr.str();
//cv::imwrite(theNumberString, cropped);
}
}
free(thresholdedData);
}
cudaFree(greyPreviousImage_d);
cout << "=== CUDA optimized convolution + aligned BGR + grey stays on device version ===" << endl;
cout << "Running time: " << runningTime << endl;
cout << "Contrours found: " << contoursFound << endl;
}
void runCPU() {
Timer timer;
float runningTimeSec = 0;
int contoursFound = 0;
cv::String path = cv::String("./frames/frames-%07d.png");
cv::VideoCapture cap(path);
cv::Mat previousImage;
cap.read(previousImage);
startTime(&timer);
unsigned char* previousGrey = makeCPUGrey(previousImage.data);
stopTime(&timer);
runningTimeSec += elapsedTime(timer);
while (cap.isOpened())
{
cv::Mat newImage;
if (!cap.read(newImage))
break;
int k = cap.get(CV_CAP_PROP_POS_FRAMES);
startTime(&timer);
unsigned char* newGrey = makeCPUGrey(newImage.data);
unsigned char *thresholdedData = makeCPUSubtractAndBlurAndThresh(newGrey, previousGrey);
stopTime(&timer);
runningTimeSec += elapsedTime(timer);
cv::Mat thresholded = cv::Mat(NUM_ROWS, NUM_COLS, IMREAD_GRAYSCALE, thresholdedData);
vector<vector<Point> > contours;
cv::findContours(thresholded, contours, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
Rect brect = cv::boundingRect(contours[i]);
if (brect.area() >= 200) {
contoursFound++;
//cv::Mat cropped = newImage(brect);
//std::ostringstream ostr;
//ostr << "diff_" << k << "_" << k-1 << "_" << i << ".jpg";
//std::string theNumberString = ostr.str();
//cv::imwrite(theNumberString, cropped);
}
}
free(thresholdedData);
free(previousGrey);
previousGrey = newGrey;
}
free(previousGrey);
cout << "=== CPU version ===" << endl;
cout << "Running time sec: " << runningTimeSec << endl;
cout << "Contrours found: " << contoursFound << endl;
}
int main()
{
runCPU();
runCudaSimple();
runCudaGreyStaysOnDevice();
runCudaGreyStaysOnDeviceAlignedBGR();
runOpenCV();
return 0;
}
|
d0b0e9b1406216cded108e7e0bff39dbf3967003.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "init.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
init), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
init), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
init), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
d0b0e9b1406216cded108e7e0bff39dbf3967003.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "init.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
init<<<gridBlock,threadBlock>>>(n,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
init<<<gridBlock,threadBlock>>>(n,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
init<<<gridBlock,threadBlock>>>(n,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
62e40362a870b158d4056b991e775264ca9bf8e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include "error.h"
#include "benchmark.h"
#ifndef BENCHMARK
#define GRID (1024)
#define BLOCK (256)
#endif
texture<uchar4, 2, hipReadModeElementType> tex;
__device__ uchar4 avg_kernel(int i, int j, int w, int h, int kernel_w, int kernel_h){
int sum_r = 0, sum_g = 0, sum_b = 0, sum_a = 0;
for(int y = i; y < i + kernel_h; ++y){
for(int x = j; x < j + kernel_w; ++x){
uchar4 pixel = tex2D(tex, x, y);
sum_r += pixel.x;
sum_g += pixel.y;
sum_b += pixel.z;
sum_a += pixel.w;
}
}
int n_pixels = kernel_w * kernel_h;
uchar4 result;
result.x = (unsigned char)(sum_r / n_pixels);
result.y = (unsigned char)(sum_g / n_pixels);
result.z = (unsigned char)(sum_b / n_pixels);
result.w = (unsigned char)(sum_a / n_pixels);
return result;
}
__global__ void SSAA(uchar4 *res, int w, int h, int new_w, int new_h){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset_x = blockDim.x * gridDim.x;
int offset_y = blockDim.y * gridDim.y;
int kernel_w = w / new_w;
int kernel_h = h / new_h;
for(int i = idy; i < new_h; i += offset_y){
for(int j = idx; j < new_w; j += offset_x){
int pix_i = i * kernel_h;
int pix_j = j * kernel_w;
res[i * new_w + j] = avg_kernel(pix_i, pix_j, w, h, kernel_w, kernel_h);
}
}
}
int main() {
init_error_handling();
char *input_file_path = (char*)malloc(PATH_MAX * sizeof(char));
char *output_file_path = (char*)malloc(PATH_MAX * sizeof(char));
scanf("%s", input_file_path);
scanf("%s", output_file_path);
int new_width = 0, new_height = 0;
scanf("%d", &new_width);
scanf("%d", &new_height);
FILE *input_file;
if((input_file = fopen(input_file_path, "rb")) == NULL) {
printf("ERROR: can't open input file\n");
exit(0);
}
free(input_file_path);
int width = 0, height = 0;
fread(&width, sizeof(int), 1, input_file);
fread(&height, sizeof(int), 1, input_file);
if(width == 0 || height == 0)
return 0;
int ref_n_pixels = width * height;
int res_n_pixels = new_height * new_width;
int ref_size = sizeof(uchar4) * ref_n_pixels;
int res_size = sizeof(uchar4) * res_n_pixels;
uchar4 *ref = (uchar4*)malloc(ref_size);
fread(ref, sizeof(uchar4), ref_n_pixels, input_file);
fclose(input_file);
uchar4 *dev_res;
hipArray *dev_ref;
hipChannelFormatDesc ch = hipCreateChannelDesc<uchar4>();
CHECK_CUDA_CALL_ERROR(hipMallocArray(&dev_ref, &ch, width, height));
CHECK_CUDA_CALL_ERROR(hipMemcpyToArray(dev_ref, 0, 0, ref, ref_size, hipMemcpyHostToDevice));
tex.addressMode[0] = hipAddressModeClamp;
tex.addressMode[1] = hipAddressModeClamp;
tex.channelDesc = ch;
tex.filterMode = hipFilterModePoint;
tex.normalized = false;
CHECK_CUDA_CALL_ERROR(hipBindTextureToArray(tex, dev_ref, ch));
CHECK_CUDA_CALL_ERROR(hipMalloc(&dev_res, res_size));
int grid_single_dim = (int)sqrt(GRID);
int block_single_dim = (int)sqrt(BLOCK);
dim3 gridDim(grid_single_dim, grid_single_dim);
dim3 blockDim(block_single_dim, block_single_dim);
hipLaunchKernelGGL(( MEASURE((SSAA), dim3(gridDim), dim3(blockDim), 0, 0, dev_res, width, height, new_width, new_height)));
CHECK_CUDA_KERNEL_ERROR();
uchar4 *res = (uchar4*)realloc(ref, res_size);
CHECK_CUDA_CALL_ERROR(hipMemcpy(res, dev_res, res_size, hipMemcpyDeviceToHost));
CHECK_CUDA_CALL_ERROR(hipUnbindTexture(tex));
CHECK_CUDA_CALL_ERROR(hipFreeArray(dev_ref));
CHECK_CUDA_CALL_ERROR(hipFree(dev_res));
FILE *output_file;
if((output_file = fopen(output_file_path, "wb")) == NULL) {
printf ("ERROR: can't open output file\n");
exit(0);
}
free(output_file_path);
fwrite(&new_width, sizeof(int), 1, output_file);
fwrite(&new_height, sizeof(int), 1, output_file);
fwrite(res, sizeof(uchar4), res_n_pixels, output_file);
fclose(output_file);
free(res);
}
|
62e40362a870b158d4056b991e775264ca9bf8e5.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include "error.h"
#include "benchmark.h"
#ifndef BENCHMARK
#define GRID (1024)
#define BLOCK (256)
#endif
texture<uchar4, 2, cudaReadModeElementType> tex;
__device__ uchar4 avg_kernel(int i, int j, int w, int h, int kernel_w, int kernel_h){
int sum_r = 0, sum_g = 0, sum_b = 0, sum_a = 0;
for(int y = i; y < i + kernel_h; ++y){
for(int x = j; x < j + kernel_w; ++x){
uchar4 pixel = tex2D(tex, x, y);
sum_r += pixel.x;
sum_g += pixel.y;
sum_b += pixel.z;
sum_a += pixel.w;
}
}
int n_pixels = kernel_w * kernel_h;
uchar4 result;
result.x = (unsigned char)(sum_r / n_pixels);
result.y = (unsigned char)(sum_g / n_pixels);
result.z = (unsigned char)(sum_b / n_pixels);
result.w = (unsigned char)(sum_a / n_pixels);
return result;
}
__global__ void SSAA(uchar4 *res, int w, int h, int new_w, int new_h){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset_x = blockDim.x * gridDim.x;
int offset_y = blockDim.y * gridDim.y;
int kernel_w = w / new_w;
int kernel_h = h / new_h;
for(int i = idy; i < new_h; i += offset_y){
for(int j = idx; j < new_w; j += offset_x){
int pix_i = i * kernel_h;
int pix_j = j * kernel_w;
res[i * new_w + j] = avg_kernel(pix_i, pix_j, w, h, kernel_w, kernel_h);
}
}
}
int main() {
init_error_handling();
char *input_file_path = (char*)malloc(PATH_MAX * sizeof(char));
char *output_file_path = (char*)malloc(PATH_MAX * sizeof(char));
scanf("%s", input_file_path);
scanf("%s", output_file_path);
int new_width = 0, new_height = 0;
scanf("%d", &new_width);
scanf("%d", &new_height);
FILE *input_file;
if((input_file = fopen(input_file_path, "rb")) == NULL) {
printf("ERROR: can't open input file\n");
exit(0);
}
free(input_file_path);
int width = 0, height = 0;
fread(&width, sizeof(int), 1, input_file);
fread(&height, sizeof(int), 1, input_file);
if(width == 0 || height == 0)
return 0;
int ref_n_pixels = width * height;
int res_n_pixels = new_height * new_width;
int ref_size = sizeof(uchar4) * ref_n_pixels;
int res_size = sizeof(uchar4) * res_n_pixels;
uchar4 *ref = (uchar4*)malloc(ref_size);
fread(ref, sizeof(uchar4), ref_n_pixels, input_file);
fclose(input_file);
uchar4 *dev_res;
cudaArray *dev_ref;
cudaChannelFormatDesc ch = cudaCreateChannelDesc<uchar4>();
CHECK_CUDA_CALL_ERROR(cudaMallocArray(&dev_ref, &ch, width, height));
CHECK_CUDA_CALL_ERROR(cudaMemcpyToArray(dev_ref, 0, 0, ref, ref_size, cudaMemcpyHostToDevice));
tex.addressMode[0] = cudaAddressModeClamp;
tex.addressMode[1] = cudaAddressModeClamp;
tex.channelDesc = ch;
tex.filterMode = cudaFilterModePoint;
tex.normalized = false;
CHECK_CUDA_CALL_ERROR(cudaBindTextureToArray(tex, dev_ref, ch));
CHECK_CUDA_CALL_ERROR(cudaMalloc(&dev_res, res_size));
int grid_single_dim = (int)sqrt(GRID);
int block_single_dim = (int)sqrt(BLOCK);
dim3 gridDim(grid_single_dim, grid_single_dim);
dim3 blockDim(block_single_dim, block_single_dim);
MEASURE((SSAA<<<gridDim, blockDim>>>(dev_res, width, height, new_width, new_height)));
CHECK_CUDA_KERNEL_ERROR();
uchar4 *res = (uchar4*)realloc(ref, res_size);
CHECK_CUDA_CALL_ERROR(cudaMemcpy(res, dev_res, res_size, cudaMemcpyDeviceToHost));
CHECK_CUDA_CALL_ERROR(cudaUnbindTexture(tex));
CHECK_CUDA_CALL_ERROR(cudaFreeArray(dev_ref));
CHECK_CUDA_CALL_ERROR(cudaFree(dev_res));
FILE *output_file;
if((output_file = fopen(output_file_path, "wb")) == NULL) {
printf ("ERROR: can't open output file\n");
exit(0);
}
free(output_file_path);
fwrite(&new_width, sizeof(int), 1, output_file);
fwrite(&new_height, sizeof(int), 1, output_file);
fwrite(res, sizeof(uchar4), res_n_pixels, output_file);
fclose(output_file);
free(res);
}
|
ca3b7f537d8b202d8b60d3532a5b25ffdf118e5e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
__global__ void myexp(float* value)
{
value[threadIdx.x] = ::exp(value[threadIdx.x]);
}
|
ca3b7f537d8b202d8b60d3532a5b25ffdf118e5e.cu
|
#include <cmath>
__global__ void myexp(float* value)
{
value[threadIdx.x] = std::exp(value[threadIdx.x]);
}
|
9c06259b89beef463b1f99253c1c061fb4447ca8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "luaT.h"
#include "THH.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "utils.c"
#include "ClassNLLCriterion.cu"
#include "HardTanh.cu"
#include "L1Cost.hip"
#include "Tanh.cu"
#include "Max.cu"
#include "Min.cu"
#include "LogSoftMax.cu"
#include "SoftMax.hip"
#include "TemporalConvolution.cu"
#include "TemporalMaxPooling.cu"
#include "SpatialConvolutionMM.cu"
#include "SpatialConvolutionMM_BHWD.cu"
#include "SpatialConvolutionCUDA.cu"
#include "SpatialSubSampling.hip"
#include "SpatialMaxPooling.cu"
#include "SpatialMaxPoolingCUDA.cu"
#include "SpatialAveragePooling.cu"
#include "SpatialAdaptiveMaxPooling.hip"
#include "Square.cu"
#include "Sqrt.hip"
#include "MultiMarginCriterion.hip"
#include "MSECriterion.hip"
#include "DistKLDivCriterion.cu"
#include "Threshold.cu"
#include "Sigmoid.hip"
#include "AbsCriterion.cu"
#include "Abs.hip"
#include "SoftPlus.cu"
#include "Exp.cu"
#include "SpatialUpSamplingNearest.cu"
#include "VolumetricConvolution.cu"
LUA_EXTERNC DLL_EXPORT int luaopen_libcunn(lua_State *L);
int luaopen_libcunn(lua_State *L)
{
lua_newtable(L);
cunn_ClassNLLCriterion_init(L);
cunn_Tanh_init(L);
cunn_Sigmoid_init(L);
cunn_Max_init(L);
cunn_Min_init(L);
cunn_HardTanh_init(L);
cunn_L1Cost_init(L);
cunn_LogSoftMax_init(L);
cunn_SoftMax_init(L);
cunn_TemporalConvolution_init(L);
cunn_TemporalMaxPooling_init(L);
cunn_SpatialConvolutionCUDA_init(L);
cunn_SpatialConvolutionMM_init(L);
cunn_SpatialConvolutionMM_BHWD_init(L);
cunn_SpatialMaxPooling_init(L);
cunn_SpatialMaxPoolingCUDA_init(L);
cunn_SpatialAdaptiveMaxPooling_init(L);
cunn_SpatialSubSampling_init(L);
cunn_SpatialAveragePooling_init(L);
cunn_MultiMarginCriterion_init(L);
cunn_Square_init(L);
cunn_Sqrt_init(L);
cunn_Threshold_init(L);
cunn_MSECriterion_init(L);
cunn_AbsCriterion_init(L);
cunn_DistKLDivCriterion_init(L);
cunn_Abs_init(L);
cunn_SoftPlus_init(L);
cunn_Exp_init(L);
cunn_SpatialUpSamplingNearest_init(L);
cunn_VolumetricConvolution_init(L);
return 1;
}
|
9c06259b89beef463b1f99253c1c061fb4447ca8.cu
|
#include "luaT.h"
#include "THC.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "utils.c"
#include "ClassNLLCriterion.cu"
#include "HardTanh.cu"
#include "L1Cost.cu"
#include "Tanh.cu"
#include "Max.cu"
#include "Min.cu"
#include "LogSoftMax.cu"
#include "SoftMax.cu"
#include "TemporalConvolution.cu"
#include "TemporalMaxPooling.cu"
#include "SpatialConvolutionMM.cu"
#include "SpatialConvolutionMM_BHWD.cu"
#include "SpatialConvolutionCUDA.cu"
#include "SpatialSubSampling.cu"
#include "SpatialMaxPooling.cu"
#include "SpatialMaxPoolingCUDA.cu"
#include "SpatialAveragePooling.cu"
#include "SpatialAdaptiveMaxPooling.cu"
#include "Square.cu"
#include "Sqrt.cu"
#include "MultiMarginCriterion.cu"
#include "MSECriterion.cu"
#include "DistKLDivCriterion.cu"
#include "Threshold.cu"
#include "Sigmoid.cu"
#include "AbsCriterion.cu"
#include "Abs.cu"
#include "SoftPlus.cu"
#include "Exp.cu"
#include "SpatialUpSamplingNearest.cu"
#include "VolumetricConvolution.cu"
LUA_EXTERNC DLL_EXPORT int luaopen_libcunn(lua_State *L);
int luaopen_libcunn(lua_State *L)
{
lua_newtable(L);
cunn_ClassNLLCriterion_init(L);
cunn_Tanh_init(L);
cunn_Sigmoid_init(L);
cunn_Max_init(L);
cunn_Min_init(L);
cunn_HardTanh_init(L);
cunn_L1Cost_init(L);
cunn_LogSoftMax_init(L);
cunn_SoftMax_init(L);
cunn_TemporalConvolution_init(L);
cunn_TemporalMaxPooling_init(L);
cunn_SpatialConvolutionCUDA_init(L);
cunn_SpatialConvolutionMM_init(L);
cunn_SpatialConvolutionMM_BHWD_init(L);
cunn_SpatialMaxPooling_init(L);
cunn_SpatialMaxPoolingCUDA_init(L);
cunn_SpatialAdaptiveMaxPooling_init(L);
cunn_SpatialSubSampling_init(L);
cunn_SpatialAveragePooling_init(L);
cunn_MultiMarginCriterion_init(L);
cunn_Square_init(L);
cunn_Sqrt_init(L);
cunn_Threshold_init(L);
cunn_MSECriterion_init(L);
cunn_AbsCriterion_init(L);
cunn_DistKLDivCriterion_init(L);
cunn_Abs_init(L);
cunn_SoftPlus_init(L);
cunn_Exp_init(L);
cunn_SpatialUpSamplingNearest_init(L);
cunn_VolumetricConvolution_init(L);
return 1;
}
|
d1643d48f3537b7d782c200c41d726cf564e6e3f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#if 1
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include "../common/cpu_anim.h"
#define DIM 1000
#define PI 3.1415926535897932f
struct hipComplex {
float r;
float i;
__device__ hipComplex(float a, float b) : r(a), i(b) {}
__device__ float magnitude2(void) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
const float scale = 1.5;
float jx = scale * (float)(DIM / 2 - x) / (DIM / 2);
float jy = scale * (float)(DIM / 2 - y) / (DIM / 2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
int i = 0;
for (i = 0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
//julia_gpu
__global__ void kernel( unsigned char *ptr, int ticks ) {
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia(x, y);
ptr[offset * 4 + 0] = (255-ticks*20)* juliaValue; // ticks ticks20
ptr[offset * 4 + 1] = (100+ticks*20) *juliaValue;
ptr[offset * 4 + 2] = (25+ticks*20) * juliaValue; //ptrRGB
ptr[offset * 4 + 3] = 255; //ptralpha
}
struct DataBlock { //CPUGPU
unsigned char *dev_bitmap;
CPUAnimBitmap *bitmap;
};
void generate_frame(DataBlock *d, int ticks) {
// dim3 blocks(DIM/16,DIM/16);
dim3 grid(DIM, DIM);
// dim3 threads(16,16);
//kernel<<<blocks,threads>>>( d->dev_bitmap, ticks );
kernel << <grid, 1 >> >(d->dev_bitmap,ticks++);
printf("%d", ticks); //ticks
HANDLE_ERROR( hipMemcpy( d->bitmap->get_ptr(),
d->dev_bitmap,
d->bitmap->image_size(),
hipMemcpyDeviceToHost ) );
/*HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost));*/
}
// clean up memory allocated on the GPU
void cleanup( DataBlock *d ) {
HANDLE_ERROR( hipFree( d->dev_bitmap ) );
//HANDLE_ERROR(hipFree(dev_bitmap));
}
int main( void ) {
DataBlock data;
CPUAnimBitmap bitmap( DIM, DIM, &data );
data.bitmap = &bitmap; ////////
HANDLE_ERROR( hipMalloc( (void**)&data.dev_bitmap, //////data
bitmap.image_size() ) );
bitmap.anim_and_exit( (void (*)(void*,int))generate_frame, /////void(*)generate_frame ///julia
(void (*)(void*))cleanup ); /////cleanup
}
#endif
|
d1643d48f3537b7d782c200c41d726cf564e6e3f.cu
|
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#if 1
#include "cuda.h"
#include "../common/book.h"
#include "../common/cpu_anim.h"
#define DIM 1000
#define PI 3.1415926535897932f
struct cuComplex {
float r;
float i;
__device__ cuComplex(float a, float b) : r(a), i(b) {}
__device__ float magnitude2(void) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
const float scale = 1.5;
float jx = scale * (float)(DIM / 2 - x) / (DIM / 2);
float jy = scale * (float)(DIM / 2 - y) / (DIM / 2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for (i = 0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
//以上从julia_gpu搬来,没有改动
__global__ void kernel( unsigned char *ptr, int ticks ) {
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia(x, y);
ptr[offset * 4 + 0] = (255-ticks*20)* juliaValue; //变色的秘密所在。 每次调用和函数,ticks都自增。 ticks乘以20的目的是,让变色效果更明显。
ptr[offset * 4 + 1] = (100+ticks*20) *juliaValue;
ptr[offset * 4 + 2] = (25+ticks*20) * juliaValue; //前三个ptr是RGB
ptr[offset * 4 + 3] = 255; //最后一个ptr是透明度alpha
}
struct DataBlock { //让CPU和GPU的存储可以相互访问
unsigned char *dev_bitmap;
CPUAnimBitmap *bitmap;
};
void generate_frame(DataBlock *d, int ticks) {
// dim3 blocks(DIM/16,DIM/16);
dim3 grid(DIM, DIM);
// dim3 threads(16,16);
//kernel<<<blocks,threads>>>( d->dev_bitmap, ticks );
kernel << <grid, 1 >> >(d->dev_bitmap,ticks++);
printf("%d", ticks); //此句在控制台显示ticks的值,可以把它注释掉
HANDLE_ERROR( cudaMemcpy( d->bitmap->get_ptr(),
d->dev_bitmap,
d->bitmap->image_size(),
cudaMemcpyDeviceToHost ) );
/*HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost));*/
}
// clean up memory allocated on the GPU
void cleanup( DataBlock *d ) {
HANDLE_ERROR( cudaFree( d->dev_bitmap ) );
//HANDLE_ERROR(cudaFree(dev_bitmap));
}
int main( void ) {
DataBlock data;
CPUAnimBitmap bitmap( DIM, DIM, &data );
data.bitmap = &bitmap; ////////相互指,可以互相访问
HANDLE_ERROR( cudaMalloc( (void**)&data.dev_bitmap, //////data这个结构体里只有两个指针
bitmap.image_size() ) );
bitmap.anim_and_exit( (void (*)(void*,int))generate_frame, /////void(*)函数指针,generate_frame 产生动画帧///julia换色
(void (*)(void*))cleanup ); /////cleanup 清除
}
#endif
|
32554bfe2de83ccb4337dbebde50729993d739a1.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* ga.c
*
* Created on: 12/12/2018
* Author: minterciso
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include "ga.h"
#include "kernels.h"
#include "common.h"
individual* create_population(hiprandGenerator_t gen)
{
// cuRand should be already initialized
int i,j;
int min=VAL_MIN;
int max=VAL_MAX;
unsigned int *h_random, *d_random;
size_t random_bytes = sizeof(unsigned int)*(LEN_SIZE*POP_SIZE);
individual *pop = NULL;
// Allocate random numbers array
if((h_random=(unsigned int*)malloc(random_bytes))==NULL)
{
perror("Unable to allocate host memory for random numbers.");
return NULL;
}
memset(h_random, '\0', random_bytes);
CHECK(hipMalloc((void**)&d_random, random_bytes));
CHECK(hipMemset(d_random,'\0', random_bytes));
// Generate random numbers on device
CHECK_RAND(hiprandGenerate(gen, d_random, LEN_SIZE*POP_SIZE));
CHECK(hipMemcpy(h_random, d_random, random_bytes, hipMemcpyDeviceToHost));
// Allocate memory for population
if((pop=(individual*)malloc(sizeof(individual)*POP_SIZE))==NULL)
{
perror("Unable to allocate host memory for population");
CHECK(hipFree(d_random));
free(h_random);
return NULL;
}
memset(pop, '\0', sizeof(individual)*POP_SIZE);
// Now finally create each string for each individual
unsigned int rnd_idx = 0;
for(i=0;i<POP_SIZE;i++)
{
for(j=0;j<LEN_SIZE;j++)
{
unsigned int rnd_val = (h_random[rnd_idx++] % (max-min+1)+min);
pop[i].s[j] = (char)(rnd_val);
}
}
// Free memory
free(h_random);
CHECK(hipFree(d_random));
return pop;
}
void destroy_population(individual *pop)
{
if(pop != NULL)
free(pop);
}
void xover_and_mutate(individual *pop, hiprandGenerator_t gen)
{
int i,j;
int min = VAL_MIN;
int max = VAL_MAX;
int qtdRandomInts = (POP_SIZE-KEEP_POP)/2 + (POP_SIZE-KEEP_POP);
int qtdRandomMut = KEEP_POP*LEN_SIZE;
int qtdRandomIntsPool = POP_SIZE*LEN_SIZE;
unsigned int *h_randomInts, *d_randomInts;
unsigned int *h_randomIntsPool, *d_randomIntsPool;
unsigned int *h_randomMut, *d_randomMut;
size_t s_randomInts = sizeof(unsigned int)*qtdRandomInts;
size_t s_randomMut = sizeof(unsigned int)*qtdRandomMut;
size_t s_randomIntsPool = sizeof(unsigned int)*qtdRandomIntsPool;
// Kill low performance individuals
for(i=KEEP_POP; i<POP_SIZE; i++)
{
pop[i].fitness = 0;
memset(pop[i].s, '\0', LEN_SIZE);
}
// Allocate memory
if((h_randomInts=(unsigned int*)malloc(s_randomInts))==NULL)
{
perror("Unable to allocate memory on host");
return;
}
if((h_randomMut=(unsigned int*)malloc(s_randomMut))==NULL)
{
perror("Unable to allocate memory on host");
free(h_randomInts);
return;
}
if((h_randomIntsPool=(unsigned int*)malloc(s_randomIntsPool))==NULL)
{
perror("Unable to allocate memory on host");
free(h_randomInts);
free(h_randomMut);
return;
}
memset(h_randomInts, '\0', s_randomInts);
memset(h_randomMut, '\0', s_randomMut);
memset(h_randomIntsPool, '\0', s_randomIntsPool);
CHECK(hipMalloc((void**)&d_randomInts, s_randomInts));
CHECK(hipMalloc((void**)&d_randomMut, s_randomMut));
CHECK(hipMalloc((void**)&d_randomIntsPool, s_randomIntsPool));
// Create random numbers on device
CHECK_RAND(hiprandGenerate(gen, d_randomInts, qtdRandomInts));
CHECK_RAND(hiprandGenerate(gen, d_randomIntsPool, qtdRandomIntsPool));
CHECK_RAND(hiprandGenerate(gen, d_randomMut, qtdRandomMut));
CHECK(hipMemcpy(h_randomInts, d_randomInts, s_randomInts, hipMemcpyDeviceToHost));
CHECK(hipMemcpy(h_randomMut, d_randomMut, s_randomMut, hipMemcpyDeviceToHost));
CHECK(hipMemcpy(h_randomIntsPool, d_randomIntsPool, s_randomIntsPool, hipMemcpyDeviceToHost));
// Now xover and mutate, based on the random numbers generated
int rndIntsIdx = 0;
int rndFloatsIdx = 0;
int rndIntsPoolIdx = 0;
float rndMut = 0.0;
for(i=KEEP_POP; i<POP_SIZE; i+=2)
{
unsigned int id1=h_randomInts[rndIntsIdx++] % KEEP_POP;
unsigned int id2=h_randomInts[rndIntsIdx++] % KEEP_POP;
individual *p1 = &pop[id1], *p2 = &pop[id2];
individual *s1 = &pop[i], *s2 = &pop[i+1];
unsigned int xp = h_randomInts[rndIntsIdx++]%LEN_SIZE;
memcpy(s1->s, p1->s, xp);
memcpy(s1->s + xp, p2->s + xp, (LEN_SIZE-xp));
memcpy(s2->s, p2->s, xp);
memcpy(s2->s + xp, p1->s +xp, (LEN_SIZE-xp));
// Mutate
for(j=0;j<LEN_SIZE;j++)
{
rndMut = (float)h_randomMut[rndFloatsIdx++]/(float)(RAND_MAX);
if(rndMut < PROB_MUT)
s1->s[j] = (char)(h_randomIntsPool[rndIntsPoolIdx++] % (max-min+1)+min);
rndMut = (float)h_randomMut[rndFloatsIdx++]/(float)(RAND_MAX);
if(rndMut < PROB_MUT)
s2->s[j] = (char)(h_randomIntsPool[rndIntsPoolIdx++] % (max-min+1)+min);
}
}
// Before stoping, zero the fitness of the best ones
for(i=0;i<KEEP_POP;i++)
{
pop[i].fitness=0;
}
free(h_randomInts);
free(h_randomMut);
free(h_randomIntsPool);
CHECK(hipFree(d_randomMut));
CHECK(hipFree(d_randomInts));
CHECK(hipFree(d_randomIntsPool));
}
|
32554bfe2de83ccb4337dbebde50729993d739a1.cu
|
/*
* ga.c
*
* Created on: 12/12/2018
* Author: minterciso
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include "ga.h"
#include "kernels.h"
#include "common.h"
individual* create_population(curandGenerator_t gen)
{
// cuRand should be already initialized
int i,j;
int min=VAL_MIN;
int max=VAL_MAX;
unsigned int *h_random, *d_random;
size_t random_bytes = sizeof(unsigned int)*(LEN_SIZE*POP_SIZE);
individual *pop = NULL;
// Allocate random numbers array
if((h_random=(unsigned int*)malloc(random_bytes))==NULL)
{
perror("Unable to allocate host memory for random numbers.");
return NULL;
}
memset(h_random, '\0', random_bytes);
CHECK(cudaMalloc((void**)&d_random, random_bytes));
CHECK(cudaMemset(d_random,'\0', random_bytes));
// Generate random numbers on device
CHECK_RAND(curandGenerate(gen, d_random, LEN_SIZE*POP_SIZE));
CHECK(cudaMemcpy(h_random, d_random, random_bytes, cudaMemcpyDeviceToHost));
// Allocate memory for population
if((pop=(individual*)malloc(sizeof(individual)*POP_SIZE))==NULL)
{
perror("Unable to allocate host memory for population");
CHECK(cudaFree(d_random));
free(h_random);
return NULL;
}
memset(pop, '\0', sizeof(individual)*POP_SIZE);
// Now finally create each string for each individual
unsigned int rnd_idx = 0;
for(i=0;i<POP_SIZE;i++)
{
for(j=0;j<LEN_SIZE;j++)
{
unsigned int rnd_val = (h_random[rnd_idx++] % (max-min+1)+min);
pop[i].s[j] = (char)(rnd_val);
}
}
// Free memory
free(h_random);
CHECK(cudaFree(d_random));
return pop;
}
void destroy_population(individual *pop)
{
if(pop != NULL)
free(pop);
}
void xover_and_mutate(individual *pop, curandGenerator_t gen)
{
int i,j;
int min = VAL_MIN;
int max = VAL_MAX;
int qtdRandomInts = (POP_SIZE-KEEP_POP)/2 + (POP_SIZE-KEEP_POP);
int qtdRandomMut = KEEP_POP*LEN_SIZE;
int qtdRandomIntsPool = POP_SIZE*LEN_SIZE;
unsigned int *h_randomInts, *d_randomInts;
unsigned int *h_randomIntsPool, *d_randomIntsPool;
unsigned int *h_randomMut, *d_randomMut;
size_t s_randomInts = sizeof(unsigned int)*qtdRandomInts;
size_t s_randomMut = sizeof(unsigned int)*qtdRandomMut;
size_t s_randomIntsPool = sizeof(unsigned int)*qtdRandomIntsPool;
// Kill low performance individuals
for(i=KEEP_POP; i<POP_SIZE; i++)
{
pop[i].fitness = 0;
memset(pop[i].s, '\0', LEN_SIZE);
}
// Allocate memory
if((h_randomInts=(unsigned int*)malloc(s_randomInts))==NULL)
{
perror("Unable to allocate memory on host");
return;
}
if((h_randomMut=(unsigned int*)malloc(s_randomMut))==NULL)
{
perror("Unable to allocate memory on host");
free(h_randomInts);
return;
}
if((h_randomIntsPool=(unsigned int*)malloc(s_randomIntsPool))==NULL)
{
perror("Unable to allocate memory on host");
free(h_randomInts);
free(h_randomMut);
return;
}
memset(h_randomInts, '\0', s_randomInts);
memset(h_randomMut, '\0', s_randomMut);
memset(h_randomIntsPool, '\0', s_randomIntsPool);
CHECK(cudaMalloc((void**)&d_randomInts, s_randomInts));
CHECK(cudaMalloc((void**)&d_randomMut, s_randomMut));
CHECK(cudaMalloc((void**)&d_randomIntsPool, s_randomIntsPool));
// Create random numbers on device
CHECK_RAND(curandGenerate(gen, d_randomInts, qtdRandomInts));
CHECK_RAND(curandGenerate(gen, d_randomIntsPool, qtdRandomIntsPool));
CHECK_RAND(curandGenerate(gen, d_randomMut, qtdRandomMut));
CHECK(cudaMemcpy(h_randomInts, d_randomInts, s_randomInts, cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(h_randomMut, d_randomMut, s_randomMut, cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(h_randomIntsPool, d_randomIntsPool, s_randomIntsPool, cudaMemcpyDeviceToHost));
// Now xover and mutate, based on the random numbers generated
int rndIntsIdx = 0;
int rndFloatsIdx = 0;
int rndIntsPoolIdx = 0;
float rndMut = 0.0;
for(i=KEEP_POP; i<POP_SIZE; i+=2)
{
unsigned int id1=h_randomInts[rndIntsIdx++] % KEEP_POP;
unsigned int id2=h_randomInts[rndIntsIdx++] % KEEP_POP;
individual *p1 = &pop[id1], *p2 = &pop[id2];
individual *s1 = &pop[i], *s2 = &pop[i+1];
unsigned int xp = h_randomInts[rndIntsIdx++]%LEN_SIZE;
memcpy(s1->s, p1->s, xp);
memcpy(s1->s + xp, p2->s + xp, (LEN_SIZE-xp));
memcpy(s2->s, p2->s, xp);
memcpy(s2->s + xp, p1->s +xp, (LEN_SIZE-xp));
// Mutate
for(j=0;j<LEN_SIZE;j++)
{
rndMut = (float)h_randomMut[rndFloatsIdx++]/(float)(RAND_MAX);
if(rndMut < PROB_MUT)
s1->s[j] = (char)(h_randomIntsPool[rndIntsPoolIdx++] % (max-min+1)+min);
rndMut = (float)h_randomMut[rndFloatsIdx++]/(float)(RAND_MAX);
if(rndMut < PROB_MUT)
s2->s[j] = (char)(h_randomIntsPool[rndIntsPoolIdx++] % (max-min+1)+min);
}
}
// Before stoping, zero the fitness of the best ones
for(i=0;i<KEEP_POP;i++)
{
pop[i].fitness=0;
}
free(h_randomInts);
free(h_randomMut);
free(h_randomIntsPool);
CHECK(cudaFree(d_randomMut));
CHECK(cudaFree(d_randomInts));
CHECK(cudaFree(d_randomIntsPool));
}
|
20d4874544c273a20be930d0fc3c75c80d47ea71.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "maxtranParallelShare.h"
// using shared memory
__global__ void matTran(int result_row_size, int result_col_size, float* result, int input_row_size, int input_col_size, float* matrix){
// each row is a block
// size of row (vert length) is block dim
extern __shared__ int shared[];
shared[threadIdx.x] = matrix[blockIdx.x * blockDim.x + threadIdx.x];
int current_row = blockIdx.x;
int current_col = threadIdx.x;
int current_vector_format = current_row * input_row_size + current_col ;
int destination_row = current_col;
int destination_col = current_row;
int destination_vector_format = destination_row * result_row_size + destination_col;
//make sure all threads loaded in shared and computed their proper indicies.
__syncthreads();
//now transfer shared mem into proper location on global mem array
result[destination_vector_format] = shared[current_col];
}
void matTranSharedParallel(float* matrix, float *result, int row, int col){
float *d_matrix, *d_result;
hipMalloc( &d_matrix, row * col * sizeof(float));
hipMalloc( &d_result, row * col * sizeof(float));
hipMemcpy(d_matrix, matrix, col * row * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_result, result, col * row * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( matTran) , dim3(row), dim3(col), col, 0, row, col, d_result, row, col, d_matrix);
hipMemcpy(result, d_result, col * row * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_matrix);
hipFree(d_result);
}
|
20d4874544c273a20be930d0fc3c75c80d47ea71.cu
|
#include "maxtranParallelShare.h"
// using shared memory
__global__ void matTran(int result_row_size, int result_col_size, float* result, int input_row_size, int input_col_size, float* matrix){
// each row is a block
// size of row (vert length) is block dim
extern __shared__ int shared[];
shared[threadIdx.x] = matrix[blockIdx.x * blockDim.x + threadIdx.x];
int current_row = blockIdx.x;
int current_col = threadIdx.x;
int current_vector_format = current_row * input_row_size + current_col ;
int destination_row = current_col;
int destination_col = current_row;
int destination_vector_format = destination_row * result_row_size + destination_col;
//make sure all threads loaded in shared and computed their proper indicies.
__syncthreads();
//now transfer shared mem into proper location on global mem array
result[destination_vector_format] = shared[current_col];
}
void matTranSharedParallel(float* matrix, float *result, int row, int col){
float *d_matrix, *d_result;
cudaMalloc( &d_matrix, row * col * sizeof(float));
cudaMalloc( &d_result, row * col * sizeof(float));
cudaMemcpy(d_matrix, matrix, col * row * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_result, result, col * row * sizeof(float), cudaMemcpyHostToDevice);
matTran <<<row, col, col>>> (row, col, d_result, row, col, d_matrix);
cudaMemcpy(result, d_result, col * row * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_matrix);
cudaFree(d_result);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.