hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
ab16843dbde3dab417b9738a0f9993b21e78d187.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void MatrixMulVarKernel(float* M, float* N, float* P, int widthAHeightB, int heightA, int widthB) {
int Row = blockIdx.y*blockDim.y+threadIdx.y;// Calculate the row index of the P element and M
int Col = blockIdx.x*blockDim.x+threadIdx.x;// Calculate the column index of P and N
if ((Row < heightA) && (Col < widthB)) {
float Pvalue = 0;
for (int k = 0; k < widthAHeightB; ++k) {
Pvalue += M[Row*widthAHeightB+k]*N[k*widthB+Col];// each thread computes one element of the block sub-matrix
}
P[Row*widthB+Col] = Pvalue;
}
} | ab16843dbde3dab417b9738a0f9993b21e78d187.cu | #include "includes.h"
__global__ void MatrixMulVarKernel(float* M, float* N, float* P, int widthAHeightB, int heightA, int widthB) {
int Row = blockIdx.y*blockDim.y+threadIdx.y;// Calculate the row index of the P element and M
int Col = blockIdx.x*blockDim.x+threadIdx.x;// Calculate the column index of P and N
if ((Row < heightA) && (Col < widthB)) {
float Pvalue = 0;
for (int k = 0; k < widthAHeightB; ++k) {
Pvalue += M[Row*widthAHeightB+k]*N[k*widthB+Col];// each thread computes one element of the block sub-matrix
}
P[Row*widthB+Col] = Pvalue;
}
} |
ea21e2444b582b6db5ace65698d28e399294826c.hip | // !!! This is a file automatically generated by hipify!!!
#define CUB_STDERR // print CUDA runtime errors to console
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <stdio.h>
using namespace cub;
CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
int main(int argc, char **argv) {
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
unsigned int n = atoi(argv[1]);
int *h_in = new int[n];
for (unsigned int i = 0; i < n; i++)
h_in[i] = (static_cast<int>(rand()) / static_cast<int>(RAND_MAX / 2));
// Set up device arrays
int *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void **)&d_in, sizeof(int) * n));
// Initialize device input
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(int) * n, hipMemcpyHostToDevice));
// Setup device output array
int *d_sum = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void **)&d_sum, sizeof(int) * 1));
// Request and allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(
DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, n));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
hipEventRecord(start);
// Do the actual reduce operation
CubDebugExit(
DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, n));
hipEventRecord(stop);
hipEventSynchronize(stop);
float ms;
hipEventElapsedTime(&ms, start, stop);
int gpu_sum;
CubDebugExit(
hipMemcpy(&gpu_sum, d_sum, sizeof(int) * 1, hipMemcpyDeviceToHost));
std::cout << gpu_sum << std::endl;
std::cout << ms << std::endl;
// Cleanup
if (d_in)
CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_sum)
CubDebugExit(g_allocator.DeviceFree(d_sum));
if (d_temp_storage)
CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
return 0;
}
| ea21e2444b582b6db5ace65698d28e399294826c.cu | #define CUB_STDERR // print CUDA runtime errors to console
#include <cub/device/device_reduce.cuh>
#include <cub/util_allocator.cuh>
#include <stdio.h>
using namespace cub;
CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
int main(int argc, char **argv) {
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
unsigned int n = atoi(argv[1]);
int *h_in = new int[n];
for (unsigned int i = 0; i < n; i++)
h_in[i] = (static_cast<int>(rand()) / static_cast<int>(RAND_MAX / 2));
// Set up device arrays
int *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void **)&d_in, sizeof(int) * n));
// Initialize device input
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * n, cudaMemcpyHostToDevice));
// Setup device output array
int *d_sum = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void **)&d_sum, sizeof(int) * 1));
// Request and allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(
DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, n));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
cudaEventRecord(start);
// Do the actual reduce operation
CubDebugExit(
DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, n));
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms, start, stop);
int gpu_sum;
CubDebugExit(
cudaMemcpy(&gpu_sum, d_sum, sizeof(int) * 1, cudaMemcpyDeviceToHost));
std::cout << gpu_sum << std::endl;
std::cout << ms << std::endl;
// Cleanup
if (d_in)
CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_sum)
CubDebugExit(g_allocator.DeviceFree(d_sum));
if (d_temp_storage)
CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
return 0;
}
|
memspace.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Using different memory spaces in CUDA
#include <stdio.h>
/**********************
* using local memory *
**********************/
// a __device__ or __global__ function runs on the GPU
__global__ void use_local_memory_GPU(float in)
{
float f; // variable "f" is in local memory and private to each thread
f = in; // parameter "in" is in local memory and private to each thread
// ... real code would presumably do other stuff here ...
}
/**********************
* using global memory *
**********************/
// a __global__ function runs on the GPU & can be called from host
__global__ void use_global_memory_GPU(float *array)
{
// "array" is a pointer into global memory on the device
array[threadIdx.x] = 2.0f * (float) threadIdx.x;
}
/**********************
* using shared memory *
**********************/
// (for clarity, hardcoding 128 threads/elements and omitting
// out-of-bounds checks)
__global__ void use_shared_memory_GPU(float *array)
{
// local variables, private to each thread
int i, index = threadIdx.x;
float average, sum = 0.0f;
// __shared__ variables are visible to all threads in the thread
// block
// and have the same lifetime as the thread block
__shared__ float sh_arr[128];
// copy data from "array" in global memory to sh_arr in shared
// memory.
// here, each thread is responsible for copying a single element.
sh_arr[index] = array[index];
__syncthreads(); // ensure all the writes to shared memory have completed
// now, sh_arr is fully populated. Let's find the average of all
// previous elements
for (i=0; i<index; i++) { sum += sh_arr[i]; }
average = sum / (index + 1.0f);
// if array[index] is greater than the average of
// array[0..index-1], replace with average.
// since array[] is in global memory, this change will be seen by
// the host (and potentially
// other thread blocks, if any)
if (array[index] > average) { array[index] = average; }
// the following code has NO EFFECT: it modifies shared memory,
// but
// the resulting modified data is never copied back to global
// memory
// and vanishes when the thread block completes
sh_arr[index] = 3.14;
}
int main(int argc, char **argv)
{
/*
* First, call a kernel that shows using local memory
*/
hipLaunchKernelGGL(( use_local_memory_GPU), dim3(1), dim3(128), 0, 0, 2.0f);
/*
* Next, call a kernel that shows using global memory
*/
float h_arr[128]; // convention: h_ variables live on host
float *d_arr; // convention: d_ variables live on device (GPU global mem)
// allocate global memory on the device, place result in "d_arr"
hipMalloc((void **) &d_arr, sizeof(float) * 128);
// now copy data from host memory "h_arr" to device memory "d_arr"
hipMemcpy((void *)d_arr, (void *)h_arr, sizeof(float) * 128,hipMemcpyHostToDevice);
// launch the kernel (1 block of 128 threads)
hipLaunchKernelGGL(( use_global_memory_GPU), dim3(1), dim3(128), 0, 0, d_arr); // modifies the contents of array at d_arr
// copy the modified array back to the host, overwriting contents
// of h_arr
hipMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128,hipMemcpyDeviceToHost);
// ... do other stuff ...
/*
* Next, call a kernel that shows using shared memory
*/
// as before, pass in a pointer to data in global memory
hipLaunchKernelGGL(( use_shared_memory_GPU), dim3(1), dim3(128), 0, 0, d_arr);
// copy the modified array back to the host
hipMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128,hipMemcpyHostToDevice);
// ... do other stuff ...
return 0;
}
| memspace.cu | // Using different memory spaces in CUDA
#include <stdio.h>
/**********************
* using local memory *
**********************/
// a __device__ or __global__ function runs on the GPU
__global__ void use_local_memory_GPU(float in)
{
float f; // variable "f" is in local memory and private to each thread
f = in; // parameter "in" is in local memory and private to each thread
// ... real code would presumably do other stuff here ...
}
/**********************
* using global memory *
**********************/
// a __global__ function runs on the GPU & can be called from host
__global__ void use_global_memory_GPU(float *array)
{
// "array" is a pointer into global memory on the device
array[threadIdx.x] = 2.0f * (float) threadIdx.x;
}
/**********************
* using shared memory *
**********************/
// (for clarity, hardcoding 128 threads/elements and omitting
// out-of-bounds checks)
__global__ void use_shared_memory_GPU(float *array)
{
// local variables, private to each thread
int i, index = threadIdx.x;
float average, sum = 0.0f;
// __shared__ variables are visible to all threads in the thread
// block
// and have the same lifetime as the thread block
__shared__ float sh_arr[128];
// copy data from "array" in global memory to sh_arr in shared
// memory.
// here, each thread is responsible for copying a single element.
sh_arr[index] = array[index];
__syncthreads(); // ensure all the writes to shared memory have completed
// now, sh_arr is fully populated. Let's find the average of all
// previous elements
for (i=0; i<index; i++) { sum += sh_arr[i]; }
average = sum / (index + 1.0f);
// if array[index] is greater than the average of
// array[0..index-1], replace with average.
// since array[] is in global memory, this change will be seen by
// the host (and potentially
// other thread blocks, if any)
if (array[index] > average) { array[index] = average; }
// the following code has NO EFFECT: it modifies shared memory,
// but
// the resulting modified data is never copied back to global
// memory
// and vanishes when the thread block completes
sh_arr[index] = 3.14;
}
int main(int argc, char **argv)
{
/*
* First, call a kernel that shows using local memory
*/
use_local_memory_GPU<<<1, 128>>>(2.0f);
/*
* Next, call a kernel that shows using global memory
*/
float h_arr[128]; // convention: h_ variables live on host
float *d_arr; // convention: d_ variables live on device (GPU global mem)
// allocate global memory on the device, place result in "d_arr"
cudaMalloc((void **) &d_arr, sizeof(float) * 128);
// now copy data from host memory "h_arr" to device memory "d_arr"
cudaMemcpy((void *)d_arr, (void *)h_arr, sizeof(float) * 128,cudaMemcpyHostToDevice);
// launch the kernel (1 block of 128 threads)
use_global_memory_GPU<<<1, 128>>>(d_arr); // modifies the contents of array at d_arr
// copy the modified array back to the host, overwriting contents
// of h_arr
cudaMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128,cudaMemcpyDeviceToHost);
// ... do other stuff ...
/*
* Next, call a kernel that shows using shared memory
*/
// as before, pass in a pointer to data in global memory
use_shared_memory_GPU<<<1, 128>>>(d_arr);
// copy the modified array back to the host
cudaMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128,cudaMemcpyHostToDevice);
// ... do other stuff ...
return 0;
}
|
1d0eb6b5f66650eb34508cca5c7f2918cb8e612d.hip | // !!! This is a file automatically generated by hipify!!!
#include "linux_helper.h"
#include "Emitter.cuh"
#include "../Graphics.h"
#include "../Engine.h"
#include "../Camera.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <device_launch_parameters.h>
#include <iostream>
enum SHADER_ATTRIBUTES_IDX {
POSITION = 0,
SIZE = 1,
TIME = 2,
NUM_SHADER_ATTRIBUTES = 3
};
static std::string ShaderAttributes[NUM_SHADER_ATTRIBUTES] = {
"positionIn",
"sizeIn",
"timeIn",
};
__global__ void initRand(hiprandState_t *_randstate) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// From CURAND library guide
// Each thread gets same seed, a different sequence number
// and no offset.
hiprand_init(2345, tid, 0, &_randstate[tid]);
}
__global__ void kill(Emitter::EmitterParams _p, float *_time) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < _p.numParticles_) {
_time[tid] = 0.0f;
tid += blockDim.x * gridDim.x;
}
}
__global__ void init(Emitter::EmitterParams _p,
float *_time,
float *_pos,
float *_acc,
float *_vel,
float *_size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < _p.numParticles_) {
_time[tid] = 1.0f;
_pos[3*tid+0] = _p.startPos_[0];
_pos[3*tid+1] = _p.startPos_[1];
_pos[3*tid+2] = _p.startPos_[2];
_acc[3*tid+0] = _p.startAcc_[0];
_acc[3*tid+1] = _p.startAcc_[1];
_acc[3*tid+2] = _p.startAcc_[2];
_vel[3*tid+0] = _p.startVel_[0];
_vel[3*tid+1] = _p.startVel_[1];
_vel[3*tid+2] = _p.startVel_[2];
_size[tid] = _p.pointSize_;
tid += blockDim.x * gridDim.x;
}
}
__global__ void newParticle(Emitter::EmitterParams _p,
float *_time,
float *_pos,
float *_acc,
float *_vel,
float *_size,
unsigned int _index,
hiprandState_t *_randstate) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int index;
int limit;
switch (_p.emitterType_) {
case Emitter::EMITTER_STREAM:
index = _index; // only add one new particle
limit = _p.numParticles_;
break;
case Emitter::EMITTER_BURST:
index = tid; // add several particles
if (_p.burstSize_ <= _p.numParticles_) limit = _p.burstSize_;
else limit = _p.numParticles_;
break;
}
while (index < limit) {
// get three random floats for start velocity, one for time
float vx_offset = 2.f * ( hiprand_normal(&_randstate[tid]) - 0.5f );
float vy_offset = 2.f * ( hiprand_normal(&_randstate[tid]) - 0.5f );
float vz_offset = 2.f * ( hiprand_normal(&_randstate[tid]) - 0.5f );
float px_offset = 2.f * ( hiprand_normal(&_randstate[tid]) - 0.5f );
float py_offset = 2.f * ( hiprand_normal(&_randstate[tid]) - 0.5f );
float pz_offset = 2.f * ( hiprand_normal(&_randstate[tid]) - 0.5f );
float t_offset = hiprand_normal(&_randstate[tid]);
_time[index] = 1.0f + t_offset*0.01;
_pos[3*index+0] = _p.startPos_[0] + px_offset * _p.posRandWeight_;
_pos[3*index+1] = _p.startPos_[1] + py_offset * _p.posRandWeight_;
_pos[3*index+2] = _p.startPos_[2] + pz_offset * _p.posRandWeight_;
_acc[3*index+0] = _p.startAcc_[0];
_acc[3*index+1] = _p.startAcc_[1];
_acc[3*index+2] = _p.startAcc_[2];
_vel[3*index+0] = _p.startVel_[0] + vx_offset * _p.velRandWeight_;
_vel[3*index+1] = _p.startVel_[1] + vy_offset * _p.velRandWeight_;
_vel[3*index+2] = _p.startVel_[2] + vz_offset * _p.velRandWeight_;
_size[index] = _p.pointSize_;
// only run once if stream (only add one at a time)
if (_p.emitterType_ == Emitter::EMITTER_STREAM) break;
index += blockDim.x * gridDim.x;
}
}
__global__ void integrate(Emitter::EmitterParams _p,
float *_time,
float *_pos,
float *_acc,
float *_vel,
float *_size,
float _dt) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < _p.numParticles_) {
if (_time[tid] > 0.0) {
// subtract elapsed time
_time[tid] -= (1.f/_p.lifeTime_)*_dt;
_vel[3*tid+0] += _dt * _acc[3*tid+0];
_vel[3*tid+1] += _dt * _acc[3*tid+1];
_vel[3*tid+2] += _dt * _acc[3*tid+2];
_pos[3*tid+0] += _dt * _vel[3*tid+0];
_pos[3*tid+1] += _dt * _vel[3*tid+1];
_pos[3*tid+2] += _dt * _vel[3*tid+2];
_size[tid] *= _p.growthFactor_;
}
tid += blockDim.x * gridDim.x;
}
}
Emitter::Emitter(unsigned int _numParticles, ShaderData*_sd) : shaderData_(_sd)
{
// set standard values
params_.numParticles_ = _numParticles;
params_.burstSize_ = _numParticles;
params_.emitterType_ = Emitter::EMITTER_STREAM;
params_.growthFactor_ = 1.f;
params_.lifeTime_ = 100.f;
params_.mass_ = 1.f;
params_.pointSize_ = 30.f;
params_.posRandWeight_ = 0.f;
params_.rate_ = 0.001f;
params_.startAcc_[0] = 0.f;
params_.startAcc_[1] = 0.f;
params_.startAcc_[2] = 0.f;
params_.startPos_[0] = 0.f;
params_.startPos_[1] = 0.f;
params_.startPos_[2] = 0.f;
params_.startVel_[0] = 0.f;
params_.startVel_[1] = 1.f;
params_.startVel_[2] = 0.f;
params_.velRandWeight_ = 0.f;
params_.blendMode_ = Emitter::BLEND_FIRE;
blocks_ = threads_ = 128;
// allocate device memory
hipMalloc((void**)&d_time_, sizeof(float)*_numParticles);
hipMalloc((void**)&d_pos_, sizeof(float)*3*_numParticles);
hipMalloc((void**)&d_acc_, sizeof(float)*3*_numParticles);
hipMalloc((void**)&d_vel_, sizeof(float)*3*_numParticles);
hipMalloc((void**)&d_size_, sizeof(float)*_numParticles);
// for random states
hipMalloc((void**)&d_randstate_, sizeof(hiprandState_t)*blocks_*threads_);
// init
init CUDA_KERNEL_DIM(blocks_,threads_)(params_,
d_time_,
d_pos_,
d_acc_,
d_vel_,
d_size_);
initRand CUDA_KERNEL_DIM(blocks_, threads_) (d_randstate_);
// first particle goes in the first slot
nextSlot_ = 0;
// reset time
nextEmission_ = params_.rate_;
// generate VBOs
std::string name("nat javla namn, todo");
Graphics::instance().buffersNew(name, VAO_, vboPos_, vboSize_, vboTime_);
Graphics::instance().geometryIs(
vboPos_,
vboSize_,
vboTime_,
_numParticles,
VBO_DYNAMIC);
const int id = shaderData_->shaderID();
Graphics & g = Graphics::instance();
int posLoc = g.shaderAttribLoc(id , ShaderAttributes[POSITION]);
int sizeLoc = g.shaderAttribLoc(id , ShaderAttributes[SIZE]);
int timeLoc = g.shaderAttribLoc(id , ShaderAttributes[TIME]);
unsigned int sID = shaderData_->shaderID();
g.bindGeometry(sID, VAO_, vboPos_, 3, 0, posLoc, 0);
g.bindGeometry(sID, VAO_, vboSize_, 1, 0, sizeLoc, 0);
g.bindGeometry(sID, VAO_, vboTime_, 1, 0, timeLoc, 0);
hipGLRegisterBufferObject(vboPos_);
hipGLRegisterBufferObject(vboSize_);
hipGLRegisterBufferObject(vboTime_);
}
Emitter::~Emitter() {
std::cout << "~Emitter()" << std::endl;
hipGLUnmapBufferObject(vboPos_);
hipGLUnmapBufferObject(vboSize_);
hipGLUnmapBufferObject(vboTime_);
}
void Emitter::display() const
{
Matrix4 * modelView = shaderData_->stdMatrix4Data(MODELVIEW);
*modelView = Engine::instance().camera()->viewMtx();
bool additive = params_.blendMode_ == BLEND_FIRE ? true : false;
Graphics::instance().drawArrays(VAO_, params_.numParticles_,
shaderData_, additive);
}
void Emitter::burst() {
if (params_.emitterType_ != Emitter::EMITTER_BURST) return;
//std::cout << "bursting" << std::endl;
hipGLMapBufferObject__((void**)&d_pos_, vboPos_);
hipGLMapBufferObject__((void**)&d_size_, vboSize_);
hipGLMapBufferObject__((void**)&d_time_, vboTime_);
newParticle CUDA_KERNEL_DIM(blocks_,threads_)(params_,
d_time_,
d_pos_,
d_acc_,
d_vel_,
d_size_,
0,
d_randstate_);
hipGLUnmapBufferObject(vboPos_);
hipGLUnmapBufferObject(vboSize_);
hipGLUnmapBufferObject(vboTime_);
}
void Emitter::update(float _dt) {
hipGLMapBufferObject__((void**)&d_pos_, vboPos_);
hipGLMapBufferObject__((void**)&d_size_, vboSize_);
hipGLMapBufferObject__((void**)&d_time_, vboTime_);
// only care about new emissions if it's a stream
if (params_.emitterType_ == Emitter::EMITTER_STREAM) {
// count off elapsed time
nextEmission_ -= _dt;
// std::cout << "Next emission: " << nextEmission_ << std::endl;
//std::cout << "Nect slot: " << nextSlot_ << std::endl;
if (nextEmission_ < 0.0) {
// calculate how many particles we should emit
int numNewParticles = (int)(-nextEmission_/params_.rate_);
// reset time for next emission
nextEmission_ += numNewParticles*params_.rate_;
nextEmission_ += params_.rate_;
// emit new particles to make up for any overlap in elapsed time
do {
// emit a particle
newParticle CUDA_KERNEL_DIM(1,1) (params_,
d_time_,
d_pos_,
d_acc_,
d_vel_,
d_size_,
nextSlot_,
d_randstate_);
// jump forward one slot
nextSlot_++;
if (nextSlot_ == params_.numParticles_) nextSlot_ = 0;
numNewParticles--;
} while (numNewParticles > 0);
} // if nextemission
} // if stream
// update all the particles
integrate CUDA_KERNEL_DIM(blocks_,threads_) (params_,
d_time_,
d_pos_,
d_acc_,
d_vel_,
d_size_,
_dt);
hipGLUnmapBufferObject(vboPos_);
hipGLUnmapBufferObject(vboSize_);
hipGLUnmapBufferObject(vboTime_);
//copyPosToHostAndPrint();
}
void Emitter::deactivate() {
hipGLMapBufferObject__((void**)&d_pos_, vboPos_);
hipGLMapBufferObject__((void**)&d_size_, vboSize_);
hipGLMapBufferObject__((void**)&d_time_, vboTime_);
kill CUDA_KERNEL_DIM(blocks_,threads_)(params_, d_time_);
hipGLUnmapBufferObject(vboPos_);
hipGLUnmapBufferObject(vboSize_);
hipGLUnmapBufferObject(vboTime_);
}
void Emitter::posIs(Vector3 _pos) {
params_.startPos_[0] = _pos.x;
params_.startPos_[1] = _pos.y;
params_.startPos_[2] = _pos.z;
}
void Emitter::accIs(Vector3 _acc) {
params_.startAcc_[0] = _acc.x;
params_.startAcc_[1] = _acc.y;
params_.startAcc_[2] = _acc.z;
}
void Emitter::velIs(Vector3 _vel) {
params_.startVel_[0] = _vel.x;
params_.startVel_[1] = _vel.y;
params_.startVel_[2] = _vel.z;
}
void Emitter::rateIs(float _rate) {
params_.rate_ = _rate;
}
void Emitter::massIs(float _mass) {
params_.mass_ = _mass;
}
void Emitter::burstSizeIs(unsigned int _burstSize) {
params_.burstSize_ = _burstSize;
}
void Emitter::lifeTimeIs(float _lifeTime) {
params_.lifeTime_ = _lifeTime;
}
void Emitter::typeIs(Type _emitterType) {
params_.emitterType_ = _emitterType;
}
void Emitter::pointSizeIs(float _size) {
params_.pointSize_ = _size;
}
void Emitter::growthFactorIs(float _growthFactor) {
params_.growthFactor_ = _growthFactor;
}
void Emitter::velRandWeightIs(float _velRandWeight) {
params_.velRandWeight_ = _velRandWeight;
}
void Emitter::posRandWeightIs(float _posRandWeight) {
params_.posRandWeight_ = _posRandWeight;
}
void Emitter::shaderDataIs(ShaderData * _shaderData) {
shaderData_ = _shaderData;
}
void Emitter::blendModeIs(BlendMode _blendMode) {
params_.blendMode_ = _blendMode;
}
void Emitter::copyPosToHostAndPrint() {
float *h_pos;
h_pos = new float[params_.numParticles_*3];
hipMemcpy(h_pos, d_pos_, sizeof(float)*3*params_.numParticles_,
hipMemcpyDeviceToHost);
for (int i=0; i<params_.numParticles_; ++i) {
std::cout << "(" << h_pos[3*i] << ", " << h_pos[3*i+1] << ", " << h_pos[3*i+2] << ")" << std::endl;
}
delete h_pos;
}
| 1d0eb6b5f66650eb34508cca5c7f2918cb8e612d.cu | #include "linux_helper.h"
#include "Emitter.cuh"
#include "../Graphics.h"
#include "../Engine.h"
#include "../Camera.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <device_launch_parameters.h>
#include <iostream>
enum SHADER_ATTRIBUTES_IDX {
POSITION = 0,
SIZE = 1,
TIME = 2,
NUM_SHADER_ATTRIBUTES = 3
};
static std::string ShaderAttributes[NUM_SHADER_ATTRIBUTES] = {
"positionIn",
"sizeIn",
"timeIn",
};
__global__ void initRand(curandState *_randstate) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// From CURAND library guide
// Each thread gets same seed, a different sequence number
// and no offset.
curand_init(2345, tid, 0, &_randstate[tid]);
}
__global__ void kill(Emitter::EmitterParams _p, float *_time) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < _p.numParticles_) {
_time[tid] = 0.0f;
tid += blockDim.x * gridDim.x;
}
}
__global__ void init(Emitter::EmitterParams _p,
float *_time,
float *_pos,
float *_acc,
float *_vel,
float *_size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < _p.numParticles_) {
_time[tid] = 1.0f;
_pos[3*tid+0] = _p.startPos_[0];
_pos[3*tid+1] = _p.startPos_[1];
_pos[3*tid+2] = _p.startPos_[2];
_acc[3*tid+0] = _p.startAcc_[0];
_acc[3*tid+1] = _p.startAcc_[1];
_acc[3*tid+2] = _p.startAcc_[2];
_vel[3*tid+0] = _p.startVel_[0];
_vel[3*tid+1] = _p.startVel_[1];
_vel[3*tid+2] = _p.startVel_[2];
_size[tid] = _p.pointSize_;
tid += blockDim.x * gridDim.x;
}
}
__global__ void newParticle(Emitter::EmitterParams _p,
float *_time,
float *_pos,
float *_acc,
float *_vel,
float *_size,
unsigned int _index,
curandState *_randstate) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int index;
int limit;
switch (_p.emitterType_) {
case Emitter::EMITTER_STREAM:
index = _index; // only add one new particle
limit = _p.numParticles_;
break;
case Emitter::EMITTER_BURST:
index = tid; // add several particles
if (_p.burstSize_ <= _p.numParticles_) limit = _p.burstSize_;
else limit = _p.numParticles_;
break;
}
while (index < limit) {
// get three random floats for start velocity, one for time
float vx_offset = 2.f * ( curand_normal(&_randstate[tid]) - 0.5f );
float vy_offset = 2.f * ( curand_normal(&_randstate[tid]) - 0.5f );
float vz_offset = 2.f * ( curand_normal(&_randstate[tid]) - 0.5f );
float px_offset = 2.f * ( curand_normal(&_randstate[tid]) - 0.5f );
float py_offset = 2.f * ( curand_normal(&_randstate[tid]) - 0.5f );
float pz_offset = 2.f * ( curand_normal(&_randstate[tid]) - 0.5f );
float t_offset = curand_normal(&_randstate[tid]);
_time[index] = 1.0f + t_offset*0.01;
_pos[3*index+0] = _p.startPos_[0] + px_offset * _p.posRandWeight_;
_pos[3*index+1] = _p.startPos_[1] + py_offset * _p.posRandWeight_;
_pos[3*index+2] = _p.startPos_[2] + pz_offset * _p.posRandWeight_;
_acc[3*index+0] = _p.startAcc_[0];
_acc[3*index+1] = _p.startAcc_[1];
_acc[3*index+2] = _p.startAcc_[2];
_vel[3*index+0] = _p.startVel_[0] + vx_offset * _p.velRandWeight_;
_vel[3*index+1] = _p.startVel_[1] + vy_offset * _p.velRandWeight_;
_vel[3*index+2] = _p.startVel_[2] + vz_offset * _p.velRandWeight_;
_size[index] = _p.pointSize_;
// only run once if stream (only add one at a time)
if (_p.emitterType_ == Emitter::EMITTER_STREAM) break;
index += blockDim.x * gridDim.x;
}
}
__global__ void integrate(Emitter::EmitterParams _p,
float *_time,
float *_pos,
float *_acc,
float *_vel,
float *_size,
float _dt) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < _p.numParticles_) {
if (_time[tid] > 0.0) {
// subtract elapsed time
_time[tid] -= (1.f/_p.lifeTime_)*_dt;
_vel[3*tid+0] += _dt * _acc[3*tid+0];
_vel[3*tid+1] += _dt * _acc[3*tid+1];
_vel[3*tid+2] += _dt * _acc[3*tid+2];
_pos[3*tid+0] += _dt * _vel[3*tid+0];
_pos[3*tid+1] += _dt * _vel[3*tid+1];
_pos[3*tid+2] += _dt * _vel[3*tid+2];
_size[tid] *= _p.growthFactor_;
}
tid += blockDim.x * gridDim.x;
}
}
Emitter::Emitter(unsigned int _numParticles, ShaderData*_sd) : shaderData_(_sd)
{
// set standard values
params_.numParticles_ = _numParticles;
params_.burstSize_ = _numParticles;
params_.emitterType_ = Emitter::EMITTER_STREAM;
params_.growthFactor_ = 1.f;
params_.lifeTime_ = 100.f;
params_.mass_ = 1.f;
params_.pointSize_ = 30.f;
params_.posRandWeight_ = 0.f;
params_.rate_ = 0.001f;
params_.startAcc_[0] = 0.f;
params_.startAcc_[1] = 0.f;
params_.startAcc_[2] = 0.f;
params_.startPos_[0] = 0.f;
params_.startPos_[1] = 0.f;
params_.startPos_[2] = 0.f;
params_.startVel_[0] = 0.f;
params_.startVel_[1] = 1.f;
params_.startVel_[2] = 0.f;
params_.velRandWeight_ = 0.f;
params_.blendMode_ = Emitter::BLEND_FIRE;
blocks_ = threads_ = 128;
// allocate device memory
cudaMalloc((void**)&d_time_, sizeof(float)*_numParticles);
cudaMalloc((void**)&d_pos_, sizeof(float)*3*_numParticles);
cudaMalloc((void**)&d_acc_, sizeof(float)*3*_numParticles);
cudaMalloc((void**)&d_vel_, sizeof(float)*3*_numParticles);
cudaMalloc((void**)&d_size_, sizeof(float)*_numParticles);
// for random states
cudaMalloc((void**)&d_randstate_, sizeof(curandState)*blocks_*threads_);
// init
init CUDA_KERNEL_DIM(blocks_,threads_)(params_,
d_time_,
d_pos_,
d_acc_,
d_vel_,
d_size_);
initRand CUDA_KERNEL_DIM(blocks_, threads_) (d_randstate_);
// first particle goes in the first slot
nextSlot_ = 0;
// reset time
nextEmission_ = params_.rate_;
// generate VBOs
std::string name("nat javla namn, todo");
Graphics::instance().buffersNew(name, VAO_, vboPos_, vboSize_, vboTime_);
Graphics::instance().geometryIs(
vboPos_,
vboSize_,
vboTime_,
_numParticles,
VBO_DYNAMIC);
const int id = shaderData_->shaderID();
Graphics & g = Graphics::instance();
int posLoc = g.shaderAttribLoc(id , ShaderAttributes[POSITION]);
int sizeLoc = g.shaderAttribLoc(id , ShaderAttributes[SIZE]);
int timeLoc = g.shaderAttribLoc(id , ShaderAttributes[TIME]);
unsigned int sID = shaderData_->shaderID();
g.bindGeometry(sID, VAO_, vboPos_, 3, 0, posLoc, 0);
g.bindGeometry(sID, VAO_, vboSize_, 1, 0, sizeLoc, 0);
g.bindGeometry(sID, VAO_, vboTime_, 1, 0, timeLoc, 0);
cudaGLRegisterBufferObject(vboPos_);
cudaGLRegisterBufferObject(vboSize_);
cudaGLRegisterBufferObject(vboTime_);
}
Emitter::~Emitter() {
std::cout << "~Emitter()" << std::endl;
cudaGLUnmapBufferObject(vboPos_);
cudaGLUnmapBufferObject(vboSize_);
cudaGLUnmapBufferObject(vboTime_);
}
void Emitter::display() const
{
Matrix4 * modelView = shaderData_->stdMatrix4Data(MODELVIEW);
*modelView = Engine::instance().camera()->viewMtx();
bool additive = params_.blendMode_ == BLEND_FIRE ? true : false;
Graphics::instance().drawArrays(VAO_, params_.numParticles_,
shaderData_, additive);
}
void Emitter::burst() {
if (params_.emitterType_ != Emitter::EMITTER_BURST) return;
//std::cout << "bursting" << std::endl;
cudaGLMapBufferObject((void**)&d_pos_, vboPos_);
cudaGLMapBufferObject((void**)&d_size_, vboSize_);
cudaGLMapBufferObject((void**)&d_time_, vboTime_);
newParticle CUDA_KERNEL_DIM(blocks_,threads_)(params_,
d_time_,
d_pos_,
d_acc_,
d_vel_,
d_size_,
0,
d_randstate_);
cudaGLUnmapBufferObject(vboPos_);
cudaGLUnmapBufferObject(vboSize_);
cudaGLUnmapBufferObject(vboTime_);
}
void Emitter::update(float _dt) {
cudaGLMapBufferObject((void**)&d_pos_, vboPos_);
cudaGLMapBufferObject((void**)&d_size_, vboSize_);
cudaGLMapBufferObject((void**)&d_time_, vboTime_);
// only care about new emissions if it's a stream
if (params_.emitterType_ == Emitter::EMITTER_STREAM) {
// count off elapsed time
nextEmission_ -= _dt;
// std::cout << "Next emission: " << nextEmission_ << std::endl;
//std::cout << "Nect slot: " << nextSlot_ << std::endl;
if (nextEmission_ < 0.0) {
// calculate how many particles we should emit
int numNewParticles = (int)(-nextEmission_/params_.rate_);
// reset time for next emission
nextEmission_ += numNewParticles*params_.rate_;
nextEmission_ += params_.rate_;
// emit new particles to make up for any overlap in elapsed time
do {
// emit a particle
newParticle CUDA_KERNEL_DIM(1,1) (params_,
d_time_,
d_pos_,
d_acc_,
d_vel_,
d_size_,
nextSlot_,
d_randstate_);
// jump forward one slot
nextSlot_++;
if (nextSlot_ == params_.numParticles_) nextSlot_ = 0;
numNewParticles--;
} while (numNewParticles > 0);
} // if nextemission
} // if stream
// update all the particles
integrate CUDA_KERNEL_DIM(blocks_,threads_) (params_,
d_time_,
d_pos_,
d_acc_,
d_vel_,
d_size_,
_dt);
cudaGLUnmapBufferObject(vboPos_);
cudaGLUnmapBufferObject(vboSize_);
cudaGLUnmapBufferObject(vboTime_);
//copyPosToHostAndPrint();
}
void Emitter::deactivate() {
cudaGLMapBufferObject((void**)&d_pos_, vboPos_);
cudaGLMapBufferObject((void**)&d_size_, vboSize_);
cudaGLMapBufferObject((void**)&d_time_, vboTime_);
kill CUDA_KERNEL_DIM(blocks_,threads_)(params_, d_time_);
cudaGLUnmapBufferObject(vboPos_);
cudaGLUnmapBufferObject(vboSize_);
cudaGLUnmapBufferObject(vboTime_);
}
void Emitter::posIs(Vector3 _pos) {
params_.startPos_[0] = _pos.x;
params_.startPos_[1] = _pos.y;
params_.startPos_[2] = _pos.z;
}
void Emitter::accIs(Vector3 _acc) {
params_.startAcc_[0] = _acc.x;
params_.startAcc_[1] = _acc.y;
params_.startAcc_[2] = _acc.z;
}
void Emitter::velIs(Vector3 _vel) {
params_.startVel_[0] = _vel.x;
params_.startVel_[1] = _vel.y;
params_.startVel_[2] = _vel.z;
}
void Emitter::rateIs(float _rate) {
params_.rate_ = _rate;
}
void Emitter::massIs(float _mass) {
params_.mass_ = _mass;
}
void Emitter::burstSizeIs(unsigned int _burstSize) {
params_.burstSize_ = _burstSize;
}
void Emitter::lifeTimeIs(float _lifeTime) {
params_.lifeTime_ = _lifeTime;
}
void Emitter::typeIs(Type _emitterType) {
params_.emitterType_ = _emitterType;
}
void Emitter::pointSizeIs(float _size) {
params_.pointSize_ = _size;
}
void Emitter::growthFactorIs(float _growthFactor) {
params_.growthFactor_ = _growthFactor;
}
void Emitter::velRandWeightIs(float _velRandWeight) {
params_.velRandWeight_ = _velRandWeight;
}
void Emitter::posRandWeightIs(float _posRandWeight) {
params_.posRandWeight_ = _posRandWeight;
}
void Emitter::shaderDataIs(ShaderData * _shaderData) {
shaderData_ = _shaderData;
}
void Emitter::blendModeIs(BlendMode _blendMode) {
params_.blendMode_ = _blendMode;
}
void Emitter::copyPosToHostAndPrint() {
float *h_pos;
h_pos = new float[params_.numParticles_*3];
cudaMemcpy(h_pos, d_pos_, sizeof(float)*3*params_.numParticles_,
cudaMemcpyDeviceToHost);
for (int i=0; i<params_.numParticles_; ++i) {
std::cout << "(" << h_pos[3*i] << ", " << h_pos[3*i+1] << ", " << h_pos[3*i+2] << ")" << std::endl;
}
delete h_pos;
}
|
eacccb3885e9c14fca30f9803153514f0649d335.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorTopK.cu"
#else
THC_API void THCTensor_(topk)(THCState* state,
THCTensor *topK,
THCudaLongTensor *indices,
THCTensor *input,
int64_t k, int dim, int dir, int sorted) {
THAssert(topK != NULL && indices != NULL && input != NULL);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, topK, indices, input));
THArgCheck(THCTensor_(nDimension)(state, topK) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
int64_t dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
int numDims = THCTensor_(nDimension)(state, input);
THArgCheck(numDims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
THArgCheck(dim >= 0 && dim < numDims, 6, "dim not in range");
int64_t sliceSize = THCTensor_(size)(state, input, dim);
THArgCheck(k > 0 && k <= sliceSize, 5, "k not in range for dimension");
// Build the output size, which is the dim being selected set to
// size k
THLongStorage* topKSize = THCTensor_(newSizeOf)(state, input);
THLongStorage_set(topKSize, dim, k);
THCTensor_(resize)(state, topK, topKSize, NULL);
THCudaLongTensor_resize(state, indices, topKSize, NULL);
THLongStorage_free(topKSize);
#define RUN_K(INDEX_T, DIM, DIR) \
hipLaunchKernelGGL(( gatherTopK<real, INDEX_T, DIM, DIR>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
inputInfo, \
sliceSize, \
k, \
inputSlices, \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
inputInfo.strides[collapseInputDim], \
topKInfo, \
topKSlices, \
topKInfo.strides[collapseTopKDim], \
indicesInfo, \
indicesInfo.strides[collapseIndicesDim])
#define RUN_DIR(INDEX_T, DIM) \
if (dir) { \
RUN_K(INDEX_T, DIM, true); \
} else { \
RUN_K(INDEX_T, DIM, false); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_DIR(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_DIR(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_DIR(INDEX_T, 3); \
} else { \
RUN_DIR(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
TensorInfo<real, INDEX_T> inputInfo = \
getTensorInfo<THCTensor, INDEX_T>(state, input); \
TensorInfo<real, INDEX_T> topKInfo = \
getTensorInfo<THCTensor, INDEX_T>(state, topK); \
TensorInfo<int64_t, INDEX_T> indicesInfo = \
getTensorInfo<THCudaLongTensor, INDEX_T>(state, indices); \
\
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
\
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
\
int64_t inputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
inputSlices *= inputInfo.sizes[i]; \
} \
int64_t topKSlices = 1; \
for (int i = 0; i < topKInfo.dims; ++i) { \
topKSlices *= topKInfo.sizes[i]; \
} \
\
dim3 grid; \
if (!THC_getGridFromTiles(inputSlices, grid)) { \
THError("Slice to sort is too large"); \
} \
\
dim3 block(::min(THCRoundUp(sliceSize, (int64_t) 32), (int64_t) 1024)); \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T);
// Based on required index size, run the algorithm with the
// appropriate index type
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, input) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, topK) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_DIR
#undef RUN_K
// Sort the results if the user wants them sorted, since our
// selection routine does not ensure sorting
if (sorted) {
// FIXME: the k/v inplace sort along slice only works for size <=
// 2048 at the moment
if (sliceSize <= 2048) {
// This avoids any memory allocations and performs all sorting
// work inplace along the slice
THCTensor_(sortKeyValueInplace)(state, topK, indices, dim, dir);
} else {
// Depend upon the backup sort that returns indices, which we
// can use in conjunction with gather to produce the original
// indices.
// This is not the most efficient implementation, especially since
// there are memory allocations performed here. If the user desires
// greater performance, they should torch.gather() the results
// themselves using the reported indices, providing previously
// allocated tensors to receive the results.
THCTensor* sortedTopK = THCTensor_(new)(state);
THCudaLongTensor* sortedIndices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sortedTopK, sortedIndices, topK, dim, dir);
THCudaLongTensor* sortedTopKIndices = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sortedTopKIndices, indices);
THCudaLongTensor_gather(state, sortedTopKIndices, indices, dim, sortedIndices);
THCTensor_(freeCopyTo)(state, sortedTopK, topK);
THCudaLongTensor_freeCopyTo(state, sortedTopKIndices, indices);
THCudaLongTensor_free(state, sortedIndices);
}
}
THCudaCheck(hipGetLastError());
}
#endif // THC_GENERIC_FILE
| eacccb3885e9c14fca30f9803153514f0649d335.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorTopK.cu"
#else
THC_API void THCTensor_(topk)(THCState* state,
THCTensor *topK,
THCudaLongTensor *indices,
THCTensor *input,
int64_t k, int dim, int dir, int sorted) {
THAssert(topK != NULL && indices != NULL && input != NULL);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, topK, indices, input));
THArgCheck(THCTensor_(nDimension)(state, topK) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
int64_t dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
int numDims = THCTensor_(nDimension)(state, input);
THArgCheck(numDims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
THArgCheck(dim >= 0 && dim < numDims, 6, "dim not in range");
int64_t sliceSize = THCTensor_(size)(state, input, dim);
THArgCheck(k > 0 && k <= sliceSize, 5, "k not in range for dimension");
// Build the output size, which is the dim being selected set to
// size k
THLongStorage* topKSize = THCTensor_(newSizeOf)(state, input);
THLongStorage_set(topKSize, dim, k);
THCTensor_(resize)(state, topK, topKSize, NULL);
THCudaLongTensor_resize(state, indices, topKSize, NULL);
THLongStorage_free(topKSize);
#define RUN_K(INDEX_T, DIM, DIR) \
gatherTopK<real, INDEX_T, DIM, DIR> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
inputInfo, \
sliceSize, \
k, \
inputSlices, \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
inputInfo.strides[collapseInputDim], \
topKInfo, \
topKSlices, \
topKInfo.strides[collapseTopKDim], \
indicesInfo, \
indicesInfo.strides[collapseIndicesDim])
#define RUN_DIR(INDEX_T, DIM) \
if (dir) { \
RUN_K(INDEX_T, DIM, true); \
} else { \
RUN_K(INDEX_T, DIM, false); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_DIR(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_DIR(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_DIR(INDEX_T, 3); \
} else { \
RUN_DIR(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
TensorInfo<real, INDEX_T> inputInfo = \
getTensorInfo<THCTensor, INDEX_T>(state, input); \
TensorInfo<real, INDEX_T> topKInfo = \
getTensorInfo<THCTensor, INDEX_T>(state, topK); \
TensorInfo<int64_t, INDEX_T> indicesInfo = \
getTensorInfo<THCudaLongTensor, INDEX_T>(state, indices); \
\
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
\
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
\
int64_t inputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
inputSlices *= inputInfo.sizes[i]; \
} \
int64_t topKSlices = 1; \
for (int i = 0; i < topKInfo.dims; ++i) { \
topKSlices *= topKInfo.sizes[i]; \
} \
\
dim3 grid; \
if (!THC_getGridFromTiles(inputSlices, grid)) { \
THError("Slice to sort is too large"); \
} \
\
dim3 block(std::min(THCRoundUp(sliceSize, (int64_t) 32), (int64_t) 1024)); \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T);
// Based on required index size, run the algorithm with the
// appropriate index type
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, input) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, topK) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_DIR
#undef RUN_K
// Sort the results if the user wants them sorted, since our
// selection routine does not ensure sorting
if (sorted) {
// FIXME: the k/v inplace sort along slice only works for size <=
// 2048 at the moment
if (sliceSize <= 2048) {
// This avoids any memory allocations and performs all sorting
// work inplace along the slice
THCTensor_(sortKeyValueInplace)(state, topK, indices, dim, dir);
} else {
// Depend upon the backup sort that returns indices, which we
// can use in conjunction with gather to produce the original
// indices.
// This is not the most efficient implementation, especially since
// there are memory allocations performed here. If the user desires
// greater performance, they should torch.gather() the results
// themselves using the reported indices, providing previously
// allocated tensors to receive the results.
THCTensor* sortedTopK = THCTensor_(new)(state);
THCudaLongTensor* sortedIndices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sortedTopK, sortedIndices, topK, dim, dir);
THCudaLongTensor* sortedTopKIndices = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sortedTopKIndices, indices);
THCudaLongTensor_gather(state, sortedTopKIndices, indices, dim, sortedIndices);
THCTensor_(freeCopyTo)(state, sortedTopK, topK);
THCudaLongTensor_freeCopyTo(state, sortedTopKIndices, indices);
THCudaLongTensor_free(state, sortedIndices);
}
}
THCudaCheck(cudaGetLastError());
}
#endif // THC_GENERIC_FILE
|
4dc673cb7f830dae597efe376195643361ab6e91.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include "hip/device_functions.h"
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <time.h>
#include <windows.h>
#include < time.h >
#include <iostream>
//-------------------------------------------------------CPU TIMER LIBRARY-------------------------------------------------------
#if defined(_MSC_VER) || defined(_MSC_EXTENSIONS)
#define DELTA_EPOCH_IN_MICROSECS 116444736000000000Ui64 // CORRECT
#else
#define DELTA_EPOCH_IN_MICROSECS 116444736000000000ULL // CORRECT
#endif
struct timezone
{
int tz_minuteswest; /* minutes W of Greenwich */
int tz_dsttime; /* type of dst correction */
};
// Definition of a gettimeofday function
int gettimeofday(struct timeval* tv, struct timezone* tz)
{
// Define a structure to receive the current Windows filetime
FILETIME ft;
// Initialize the present time to 0 and the timezone to UTC
unsigned __int64 tmpres = 0;
static int tzflag = 0;
if (NULL != tv)
{
GetSystemTimeAsFileTime(&ft);
// The GetSystemTimeAsFileTime returns the number of 100 nanosecond
// intervals since Jan 1, 1601 in a structure. Copy the high bits to
// the 64 bit tmpres, shift it left by 32 then or in the low 32 bits.
tmpres |= ft.dwHighDateTime;
tmpres <<= 32;
tmpres |= ft.dwLowDateTime;
// Convert to microseconds by dividing by 10
tmpres /= 10;
// The Unix epoch starts on Jan 1 1970. Need to subtract the difference
// in seconds from Jan 1 1601.
tmpres -= DELTA_EPOCH_IN_MICROSECS;
// Finally change microseconds to seconds and place in the seconds value.
// The modulus picks up the microseconds.
tv->tv_sec = (long)(tmpres / 1000000UL);
tv->tv_usec = (long)(tmpres % 1000000UL);
}
if (NULL != tz)
{
if (!tzflag)
{
_tzset();
tzflag++;
}
// Adjust for the timezone west of Greenwich
tz->tz_minuteswest = _timezone / 60;
tz->tz_dsttime = _daylight;
}
return 0;
}
//--------------------------------------------------------GPU TIMER LIBRARY--------------------------------------------------------------------
struct GpuTimer
{
hipEvent_t start;
hipEvent_t stop;
GpuTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start()
{
hipEventRecord(start, 0);
}
void Stop()
{
hipEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
//-----------------------------------------------------------------------------------------------------------------------------
using namespace std;
//Device function which bubble sorts a specific section of arr[]. Section [start:end]. Both inclusive.
__device__ void bubblesort(int* arr, int start, int end)
{
int n = end - start + 1; //Length of array section from start to end
int i, k, flag, temp;
for (k = 1; k < (n - 1) + 1; k++)
{
flag = 0;
for (i = start; i < (end - k + 1); i++)
{
if (arr[i] > arr[i + 1])
{
temp = arr[i]; //
arr[i] = arr[i + 1]; // Swapping A[i+1] and A[i]
arr[i + 1] = temp; //
}
}
}
}
//Entire block for the entire array. Each thread takes care of bubble-sorting an individual section of size : *section_length
__global__ void section_sort(int* nums, int section_size) //(, int n)
{
//The thread with thread index = idx will take care of nums[] from : [( section_size * idx ) to ( section_size * (idx + 1) - 1 )]
//For example: idx = 1 and section_size = 20, then, thread with idx = 1 will take care of nums[ 20: 39 ]
int idx = threadIdx.x;
//Bubble sort nums[] from index [ ( section_size * idx ) : ( section_size * (idx + 1) - 1 ) ]
bubblesort(nums, section_size * idx, (section_size * (idx + 1) - 1));
}
//----------------------------------------------------------------------------------------------------------------------------
//Device Function: Takes a number target and searches the array arr[] ( an array of size n), and returns the index such that : nums[index] <= target
__device__ int bin_search(int* arr, int target, int n) // n = size of arr2
{
//Corner Cases : When the target is out of boundary of the range of values in the array
if (target < arr[0]) return -1;
if (target > arr[n - 1]) return n - 1;
//f, l, mid
int left = 0;
int right = n;
int mid = (left + right) / 2;
while (left <= right)
{
int mid = (left + right) / 2; //Calculate mid
if (arr[mid] == target)
{
return mid - 1; //Return index where nums[] == target
}
//All elements to right of mid are greater than target
else if (arr[mid] > target)
{
//If nums[mid-1] < target < nums[mid] ( Meaning target lies between nums[mid-1] and nums[mid] ==> (mid-1) is the required index)
if (arr[mid - 1] < target)
{
return (mid - 1);
}
else //Change the right border
{
right = mid - 1;
}
}
//All elements to left of mid are lesser than target
else if (arr[mid] < target)
{
//If nums[mid] < target < nums[mid+1] ( Meaning target lies between nums[mid] and nums[mid+1] )
if ((arr[mid + 1] > target))
{
return (mid);
}
else //Change the left border
{
left = mid + 1;
}
}
}
return -1;
}
//Merges 2 sorted array, by using a GPU kernel call to parallely produce scatter addresses:
// Each Thread of block will parallely produce scatter addresses for its element. Block is divided into 2 sections.
// Scatter address for All elements of both sections are parallelly produced.
// THe block is responsible for merging both of its sections
// Finally, the block in array is sorted according to the scatter addresses
// *arr = array pointer
// *section_size = The length of the both 2 subarrays into which arr[] is split
// *d_out_temp = Where array output is stored
__global__ void merge(int* arr, int section_length, int* d_out_temp)
{
//int section_length = *section_size;
int superset_length = section_length * 2; //Block will be 2 * (size of 1 section). Because 2 sections are merged
int idx = threadIdx.x;
int b_idx = blockIdx.x;
//Length of arr1[] and arr2[] are section size
int len1 = section_length;
int len2 = section_length;
//-----Select *arr1 and *arr2 and *d_out_curr------------------------
int* arr1 = arr + (b_idx * superset_length);
int* arr2 = arr1 + (section_length);
int* d_out_curr = d_out_temp + (b_idx * superset_length); //Determine d_out_curr[], the output array for current merge
//Dynamically allocated shared memory array.
// scat_ad[] from index [0 to n1-1] is for arr1[].
//scat_ad[] from index [n1 to n2-1] is for arr2[]
//Create a shared memory of size n1+n2 to accomodate the scatter-addresses corresonding to each element in arr1[] and arr2[]
extern __shared__ int scat_ad[];
//--------------------------------These threads are responsible for arr1[]-------------------------------------------------------
if (idx <= len1 - 1)
{
int idx1 = idx; //Number of elements in arr1[] that are lesser than arr1[idx]. idx1 = index of current element in arr1[]
int target = arr1[idx1]; //Target is current element in arr1[]
//--------------Find idx2----------------------------------------Binary Search Part------------------------------
int idx2 = bin_search(arr2, target, len2) + 1; //Number of elements in arr2[] that are lesser than arr1[idx].....
//Calculate and store the scatter address in array
//scat_arr1[idx] = idx1 + idx2; //If there are 2 elements before a number in output array, its index will be 2
scat_ad[idx] = idx1 + idx2; //Scatter address correspinding to arr1[idx] = idx1 + idx2
}
//--------------------------------------These threads are responsible for arr2[]--------------------------------------------
else if (idx >= len1)
{
//Number of elements in arr2[] that are lesser than arr2[idx].
//idx1 = index of current element in arr2[]
//(idx-len1) because threads with index n1 to n2-1 are responsible for arr2[] index [0: n2-1]
int idx1 = idx - len1;
int target = arr2[idx1]; //Target is current element in arr1[]
//--------------Find idx2-----------------------------Binary Search Part---------------------------
int idx2 = bin_search(arr1, target, len1) + 1; //Number of elements in arr1[] that are lesser than arr2[idx]. +1 bcos we want appropriate position for current element
//Calculate and store the scatter address in array
//scat_arr1[idx] = idx1 + idx2; //If there are 2 elements before a number in output array, its index will be 2
scat_ad[idx] = idx1 + idx2; //Scatter address corresponding to arr2[idx - len1] = idx1 + idx2
}
__syncthreads(); //Barrier to ensure that all threads have finished writing scat_ad[].------------------Not necessary
//-------------Store the output in respective position in d_out_temp[] using scatter address so that they are in sorted order-----------------------------------
/*
if (idx < len1)
{
d_out_curr[scat_ad[idx]] = arr1[idx];
}
else if (idx >= len1)
{
//d_out_curr[scat_ad[idx]] = arr2[idx - len1];
d_out_curr[scat_ad[idx]] = arr1[idx];
}
*/
d_out_curr[scat_ad[idx]] = arr1[idx];
__syncthreads();
//--------------------------------------Copy sorted elements back to array-----------------------------------------------------
arr1[idx] = d_out_curr[idx];
//printf( "%d ", arr1[idx] );
}
//Makes kernel call to merge 2 sorted array:
//
void merge_sort()
{
GpuTimer timer;
//4 sections of 5 elements size
//int h_arr[] = { 120,119,118,117,116, 115,114,113,112,111, 110,109,108,107,106, 105,104,103,102,101 };
//int h_arr[] = { 596, 703, 277, 228, 548, 515, 213, 880, 391, 364, 224, 623, 845, 152, 454, 987, 854, 257, 402, 990, 996, 819, 756, 735, 460, 87, 693, 268, 92, 14, 860, 68, 996, 934, 478, 855, 209, 293, 171, 285 };
int h_arr[40] = { 100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,63,62,61 };
//int h_arr[80] = { 100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21 };
//int h_arr[64] = { 100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37 };
int n = sizeof(h_arr) / sizeof(int); //n = Total size of host array
int div_num = 4; //How many parts the array is initially split.
int section_size = n/div_num; // section_size = Size of each section after splitting arr[] into div_num parts (Stored in Host)
//-----------------------------------Create input and output arrays in GPU---------------------------------------------
int* d_arr, * d_out_temp; // *d_out2;
hipMalloc((void**)&d_arr, n * sizeof(int));
hipMemcpy((void*)d_arr, (void*)h_arr, n * sizeof(int), hipMemcpyHostToDevice); //d_arr[] is input array in device
hipMalloc((void**)&d_out_temp, n * sizeof(int)); //d_out_temp[] is temporarily used to store sorted block elements
timer.Start();
//------------------------------Stage-1: KERNEL CALL: Bubble Sort Each Section of section_size elements------------------------------------
hipLaunchKernelGGL(( section_sort) , dim3(1), dim3(div_num), 0, 0, d_arr, section_size); //Call div_num threads: Each thread bubble-sorts a sub-section of n/div_num elements in the array.
/*
//---------------Stage-2 : KERNEL CALL: Perform 2 Parallel Merges on 2 Groups of 2 Sections (Each Section Of Size n/4)-----------------------------------------------
//Make kernel call to 2 blocks of n/2 threads each. Each thread is responsible for 1 element of its block. 3rd parameter n/2 is for shared memory size
//Imagine The entire arr[] is divided into 2 blocks of n/2 size each. Each block is divided into sections of section_size
div_num = div_num / 2; //Initially : Number of supersets will be Half of Total Number of Divisons (Here: 8/2 = 4)
merge <<< div_num, n/div_num, n/div_num >>> ( d_arr, section_size, d_out_temp);
//Number of Threads = Size of Superset (Group of 2 sections)
//NUmber of blocks = Number of supersets
//---------------------------Stage-3 : KERNEL CALL: Perform 1 Merge On 2 Sections (Each of Size n/2)-------------------------------------------------------------------
//Make kernel call to 1 blocks of n threads . Each thread is responsible for 1 element of its block. 3rd parameter n is for shared memory size
//Entire arr[] is 1 block.
div_num = div_num / 2; //Number of supersets will be halved (Here : 4/2= 2)
section_size = section_size * 2; //Size of each section will double
merge <<< div_num, n/div_num, n/div_num >>> ( d_arr, section_size, d_out_temp); //Call kernel with INPUT: d_out_temp, and Output = d_out2[].... Section Length = 10
div_num = div_num / 2; //Number of supersets will be halved (Here : 2/2 = 1)
section_size = section_size * 2; //Size of each section will double
merge <<< div_num, n/div_num, n/div_num >>> (d_arr, section_size, d_out_temp); //Call kernel with INPUT: d_out_temp, and Output = d_out2[].... Section Length = 10
*/
//-----------------------------VERY IMPORTANT NOTE------------------------
//NOTE: SUPERSET = GROUP OF 2 SECTIONS. WHEN WE MERGE A SUPERSET, WE MERGE THE 2 SECTIONS OF THE SUPERSET TO PRODUCE A SORTED SUPERSET
//Initially, section_size = n / div_num
int superset_num = div_num / 2; //Is the total number of supersets in the array, each of which are merged by a separate block. Initially, number of supersets = half of total number of divisions/sections in array
while (superset_num >= 1)
{
/*
//---------------Stage-suoerset_num : KERNEL CALL: Perform 2 Parallel Merges on 2 Groups of 2 Sections (Each Section Of Size n/4)-----------------------------------------------
*/
//Make kernel call to 2 blocks of n/2 threads each. Each thread is responsible for 1 element of its block. 3rd parameter n/2 is for shared memory size
//Imagine The entire arr[] is divided into 2 blocks of n/2 size each. Each block is divided into sections of section_size
//Number of Threads = Size of Superset
//Number of blocks = Number of supersets
hipLaunchKernelGGL(( merge) , dim3(superset_num), dim3(n/superset_num), n/superset_num , 0, d_arr, section_size, d_out_temp);
//UPDATE : superset_num ( halved ) and section_size (doubled)
superset_num = superset_num/2;
section_size = section_size*2;
}
timer.Stop();
double time_elapsed = timer.Elapsed();
//---------------------------Copy Final Sorted Output From Device into a Host Array h_out[]------------------------------------------
int* output_array = (int*)malloc( n * sizeof(int));
hipMemcpy((void*)output_array, (void*)d_arr, n * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
{
printf("%d ", output_array[i]);
//if (i == 9) cout << endl;
}
printf("\n Time Elapsed : %g ms", time_elapsed);
}
void main()
{
merge_sort();
}
| 4dc673cb7f830dae597efe376195643361ab6e91.cu | #ifndef __CUDACC__
#define __CUDACC__
#endif
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <cuda.h>
#include "device_functions.h"
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <time.h>
#include <windows.h>
#include < time.h >
#include <iostream>
//-------------------------------------------------------CPU TIMER LIBRARY-------------------------------------------------------
#if defined(_MSC_VER) || defined(_MSC_EXTENSIONS)
#define DELTA_EPOCH_IN_MICROSECS 116444736000000000Ui64 // CORRECT
#else
#define DELTA_EPOCH_IN_MICROSECS 116444736000000000ULL // CORRECT
#endif
struct timezone
{
int tz_minuteswest; /* minutes W of Greenwich */
int tz_dsttime; /* type of dst correction */
};
// Definition of a gettimeofday function
int gettimeofday(struct timeval* tv, struct timezone* tz)
{
// Define a structure to receive the current Windows filetime
FILETIME ft;
// Initialize the present time to 0 and the timezone to UTC
unsigned __int64 tmpres = 0;
static int tzflag = 0;
if (NULL != tv)
{
GetSystemTimeAsFileTime(&ft);
// The GetSystemTimeAsFileTime returns the number of 100 nanosecond
// intervals since Jan 1, 1601 in a structure. Copy the high bits to
// the 64 bit tmpres, shift it left by 32 then or in the low 32 bits.
tmpres |= ft.dwHighDateTime;
tmpres <<= 32;
tmpres |= ft.dwLowDateTime;
// Convert to microseconds by dividing by 10
tmpres /= 10;
// The Unix epoch starts on Jan 1 1970. Need to subtract the difference
// in seconds from Jan 1 1601.
tmpres -= DELTA_EPOCH_IN_MICROSECS;
// Finally change microseconds to seconds and place in the seconds value.
// The modulus picks up the microseconds.
tv->tv_sec = (long)(tmpres / 1000000UL);
tv->tv_usec = (long)(tmpres % 1000000UL);
}
if (NULL != tz)
{
if (!tzflag)
{
_tzset();
tzflag++;
}
// Adjust for the timezone west of Greenwich
tz->tz_minuteswest = _timezone / 60;
tz->tz_dsttime = _daylight;
}
return 0;
}
//--------------------------------------------------------GPU TIMER LIBRARY--------------------------------------------------------------------
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
//-----------------------------------------------------------------------------------------------------------------------------
using namespace std;
//Device function which bubble sorts a specific section of arr[]. Section [start:end]. Both inclusive.
__device__ void bubblesort(int* arr, int start, int end)
{
int n = end - start + 1; //Length of array section from start to end
int i, k, flag, temp;
for (k = 1; k < (n - 1) + 1; k++)
{
flag = 0;
for (i = start; i < (end - k + 1); i++)
{
if (arr[i] > arr[i + 1])
{
temp = arr[i]; //
arr[i] = arr[i + 1]; // Swapping A[i+1] and A[i]
arr[i + 1] = temp; //
}
}
}
}
//Entire block for the entire array. Each thread takes care of bubble-sorting an individual section of size : *section_length
__global__ void section_sort(int* nums, int section_size) //(, int n)
{
//The thread with thread index = idx will take care of nums[] from : [( section_size * idx ) to ( section_size * (idx + 1) - 1 )]
//For example: idx = 1 and section_size = 20, then, thread with idx = 1 will take care of nums[ 20: 39 ]
int idx = threadIdx.x;
//Bubble sort nums[] from index [ ( section_size * idx ) : ( section_size * (idx + 1) - 1 ) ]
bubblesort(nums, section_size * idx, (section_size * (idx + 1) - 1));
}
//----------------------------------------------------------------------------------------------------------------------------
//Device Function: Takes a number target and searches the array arr[] ( an array of size n), and returns the index such that : nums[index] <= target
__device__ int bin_search(int* arr, int target, int n) // n = size of arr2
{
//Corner Cases : When the target is out of boundary of the range of values in the array
if (target < arr[0]) return -1;
if (target > arr[n - 1]) return n - 1;
//f, l, mid
int left = 0;
int right = n;
int mid = (left + right) / 2;
while (left <= right)
{
int mid = (left + right) / 2; //Calculate mid
if (arr[mid] == target)
{
return mid - 1; //Return index where nums[] == target
}
//All elements to right of mid are greater than target
else if (arr[mid] > target)
{
//If nums[mid-1] < target < nums[mid] ( Meaning target lies between nums[mid-1] and nums[mid] ==> (mid-1) is the required index)
if (arr[mid - 1] < target)
{
return (mid - 1);
}
else //Change the right border
{
right = mid - 1;
}
}
//All elements to left of mid are lesser than target
else if (arr[mid] < target)
{
//If nums[mid] < target < nums[mid+1] ( Meaning target lies between nums[mid] and nums[mid+1] )
if ((arr[mid + 1] > target))
{
return (mid);
}
else //Change the left border
{
left = mid + 1;
}
}
}
return -1;
}
//Merges 2 sorted array, by using a GPU kernel call to parallely produce scatter addresses:
// Each Thread of block will parallely produce scatter addresses for its element. Block is divided into 2 sections.
// Scatter address for All elements of both sections are parallelly produced.
// THe block is responsible for merging both of its sections
// Finally, the block in array is sorted according to the scatter addresses
// *arr = array pointer
// *section_size = The length of the both 2 subarrays into which arr[] is split
// *d_out_temp = Where array output is stored
__global__ void merge(int* arr, int section_length, int* d_out_temp)
{
//int section_length = *section_size;
int superset_length = section_length * 2; //Block will be 2 * (size of 1 section). Because 2 sections are merged
int idx = threadIdx.x;
int b_idx = blockIdx.x;
//Length of arr1[] and arr2[] are section size
int len1 = section_length;
int len2 = section_length;
//-----Select *arr1 and *arr2 and *d_out_curr------------------------
int* arr1 = arr + (b_idx * superset_length);
int* arr2 = arr1 + (section_length);
int* d_out_curr = d_out_temp + (b_idx * superset_length); //Determine d_out_curr[], the output array for current merge
//Dynamically allocated shared memory array.
// scat_ad[] from index [0 to n1-1] is for arr1[].
//scat_ad[] from index [n1 to n2-1] is for arr2[]
//Create a shared memory of size n1+n2 to accomodate the scatter-addresses corresonding to each element in arr1[] and arr2[]
extern __shared__ int scat_ad[];
//--------------------------------These threads are responsible for arr1[]-------------------------------------------------------
if (idx <= len1 - 1)
{
int idx1 = idx; //Number of elements in arr1[] that are lesser than arr1[idx]. idx1 = index of current element in arr1[]
int target = arr1[idx1]; //Target is current element in arr1[]
//--------------Find idx2----------------------------------------Binary Search Part------------------------------
int idx2 = bin_search(arr2, target, len2) + 1; //Number of elements in arr2[] that are lesser than arr1[idx].....
//Calculate and store the scatter address in array
//scat_arr1[idx] = idx1 + idx2; //If there are 2 elements before a number in output array, its index will be 2
scat_ad[idx] = idx1 + idx2; //Scatter address correspinding to arr1[idx] = idx1 + idx2
}
//--------------------------------------These threads are responsible for arr2[]--------------------------------------------
else if (idx >= len1)
{
//Number of elements in arr2[] that are lesser than arr2[idx].
//idx1 = index of current element in arr2[]
//(idx-len1) because threads with index n1 to n2-1 are responsible for arr2[] index [0: n2-1]
int idx1 = idx - len1;
int target = arr2[idx1]; //Target is current element in arr1[]
//--------------Find idx2-----------------------------Binary Search Part---------------------------
int idx2 = bin_search(arr1, target, len1) + 1; //Number of elements in arr1[] that are lesser than arr2[idx]. +1 bcos we want appropriate position for current element
//Calculate and store the scatter address in array
//scat_arr1[idx] = idx1 + idx2; //If there are 2 elements before a number in output array, its index will be 2
scat_ad[idx] = idx1 + idx2; //Scatter address corresponding to arr2[idx - len1] = idx1 + idx2
}
__syncthreads(); //Barrier to ensure that all threads have finished writing scat_ad[].------------------Not necessary
//-------------Store the output in respective position in d_out_temp[] using scatter address so that they are in sorted order-----------------------------------
/*
if (idx < len1)
{
d_out_curr[scat_ad[idx]] = arr1[idx];
}
else if (idx >= len1)
{
//d_out_curr[scat_ad[idx]] = arr2[idx - len1];
d_out_curr[scat_ad[idx]] = arr1[idx];
}
*/
d_out_curr[scat_ad[idx]] = arr1[idx];
__syncthreads();
//--------------------------------------Copy sorted elements back to array-----------------------------------------------------
arr1[idx] = d_out_curr[idx];
//printf( "%d ", arr1[idx] );
}
//Makes kernel call to merge 2 sorted array:
//
void merge_sort()
{
GpuTimer timer;
//4 sections of 5 elements size
//int h_arr[] = { 120,119,118,117,116, 115,114,113,112,111, 110,109,108,107,106, 105,104,103,102,101 };
//int h_arr[] = { 596, 703, 277, 228, 548, 515, 213, 880, 391, 364, 224, 623, 845, 152, 454, 987, 854, 257, 402, 990, 996, 819, 756, 735, 460, 87, 693, 268, 92, 14, 860, 68, 996, 934, 478, 855, 209, 293, 171, 285 };
int h_arr[40] = { 100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,63,62,61 };
//int h_arr[80] = { 100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21 };
//int h_arr[64] = { 100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37 };
int n = sizeof(h_arr) / sizeof(int); //n = Total size of host array
int div_num = 4; //How many parts the array is initially split.
int section_size = n/div_num; // section_size = Size of each section after splitting arr[] into div_num parts (Stored in Host)
//-----------------------------------Create input and output arrays in GPU---------------------------------------------
int* d_arr, * d_out_temp; // *d_out2;
cudaMalloc((void**)&d_arr, n * sizeof(int));
cudaMemcpy((void*)d_arr, (void*)h_arr, n * sizeof(int), cudaMemcpyHostToDevice); //d_arr[] is input array in device
cudaMalloc((void**)&d_out_temp, n * sizeof(int)); //d_out_temp[] is temporarily used to store sorted block elements
timer.Start();
//------------------------------Stage-1: KERNEL CALL: Bubble Sort Each Section of section_size elements------------------------------------
section_sort <<<1, div_num>>> (d_arr, section_size); //Call div_num threads: Each thread bubble-sorts a sub-section of n/div_num elements in the array.
/*
//---------------Stage-2 : KERNEL CALL: Perform 2 Parallel Merges on 2 Groups of 2 Sections (Each Section Of Size n/4)-----------------------------------------------
//Make kernel call to 2 blocks of n/2 threads each. Each thread is responsible for 1 element of its block. 3rd parameter n/2 is for shared memory size
//Imagine The entire arr[] is divided into 2 blocks of n/2 size each. Each block is divided into sections of section_size
div_num = div_num / 2; //Initially : Number of supersets will be Half of Total Number of Divisons (Here: 8/2 = 4)
merge <<< div_num, n/div_num, n/div_num >>> ( d_arr, section_size, d_out_temp);
//Number of Threads = Size of Superset (Group of 2 sections)
//NUmber of blocks = Number of supersets
//---------------------------Stage-3 : KERNEL CALL: Perform 1 Merge On 2 Sections (Each of Size n/2)-------------------------------------------------------------------
//Make kernel call to 1 blocks of n threads . Each thread is responsible for 1 element of its block. 3rd parameter n is for shared memory size
//Entire arr[] is 1 block.
div_num = div_num / 2; //Number of supersets will be halved (Here : 4/2= 2)
section_size = section_size * 2; //Size of each section will double
merge <<< div_num, n/div_num, n/div_num >>> ( d_arr, section_size, d_out_temp); //Call kernel with INPUT: d_out_temp, and Output = d_out2[].... Section Length = 10
div_num = div_num / 2; //Number of supersets will be halved (Here : 2/2 = 1)
section_size = section_size * 2; //Size of each section will double
merge <<< div_num, n/div_num, n/div_num >>> (d_arr, section_size, d_out_temp); //Call kernel with INPUT: d_out_temp, and Output = d_out2[].... Section Length = 10
*/
//-----------------------------VERY IMPORTANT NOTE------------------------
//NOTE: SUPERSET = GROUP OF 2 SECTIONS. WHEN WE MERGE A SUPERSET, WE MERGE THE 2 SECTIONS OF THE SUPERSET TO PRODUCE A SORTED SUPERSET
//Initially, section_size = n / div_num
int superset_num = div_num / 2; //Is the total number of supersets in the array, each of which are merged by a separate block. Initially, number of supersets = half of total number of divisions/sections in array
while (superset_num >= 1)
{
/*
//---------------Stage-suoerset_num : KERNEL CALL: Perform 2 Parallel Merges on 2 Groups of 2 Sections (Each Section Of Size n/4)-----------------------------------------------
*/
//Make kernel call to 2 blocks of n/2 threads each. Each thread is responsible for 1 element of its block. 3rd parameter n/2 is for shared memory size
//Imagine The entire arr[] is divided into 2 blocks of n/2 size each. Each block is divided into sections of section_size
//Number of Threads = Size of Superset
//Number of blocks = Number of supersets
merge <<< superset_num, n/superset_num, n/superset_num >>> (d_arr, section_size, d_out_temp);
//UPDATE : superset_num ( halved ) and section_size (doubled)
superset_num = superset_num/2;
section_size = section_size*2;
}
timer.Stop();
double time_elapsed = timer.Elapsed();
//---------------------------Copy Final Sorted Output From Device into a Host Array h_out[]------------------------------------------
int* output_array = (int*)malloc( n * sizeof(int));
cudaMemcpy((void*)output_array, (void*)d_arr, n * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
{
printf("%d ", output_array[i]);
//if (i == 9) cout << endl;
}
printf("\n Time Elapsed : %g ms", time_elapsed);
}
void main()
{
merge_sort();
}
|
f8f694eb3071eef1876de094f7c9ff314cd1439d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "rocblas.h"
#include "../debug.h"
/* macro for index calculations */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* matrix size and thread dimensions */
#define SIZE 1024
/* setup various hard-coded parameters for this kernel */
#define TBX 64 // Size of C this CTA is responsible for, x dimension
#define TBY 64 // Size of C this CTA is responsible for, y dimension
#define TX 16 // Thread block size, x dimension
#define TY 16 // Thread block size, y dimension
#define BK 16 // square block of K size
#define NX 4 // = TBX/TX == number of iterations to do TBX work with TX blocks
#define NY 4 // = TBY/TY == number of iterations to do TBY work with TY blocks
__global__ void GPU_shmem2(const int m, double const * const a, double const * const b, double *c )
{
/* setup some constants for later use */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * TBY;
const int ibx = blockIdx.x * TBX;
/* shared memory arrays for A and B */
/* insert code for shared mem array sizes */
__shared__ double as[ FIXME ][ FIXME ];
__shared__ double bs[ FIXME ][ FIXME ];
/* space for C to be held in registers */
/* insert code for c_tmp size */
double c_tmp[ FIXME ][ FIXME ] ;
/* zero the temp C array */
#pragma unroll
/* complete the upper limit of the for loops */
for ( int i = 0 ; i < FIXME ; i++) {
for ( int j = 0 ; j < FIXME ; j++) {
c_tmp[i][j] = 0.0;
}
}
/* calculate my initial offset into A and B */
int aoff = INDX( ibx + tx, ty, m );
int boff = INDX( tx, iby + ty, m );
/* main loop over blocks of K */
for( int Kblock = 0; Kblock < m; Kblock+=BK )
{
/* read block of A into shared memory */
#pragma unroll
for ( int i = 0; i < NX ; i ++ )
{
/* complete the index into the arrays */
as[ FIXME ][ FIXME ] = a[ (aoff + FIXME) ];
}
/* read block of B into shared memory */
#pragma unroll
for ( int i = 0; i < NY ; i ++ )
{
/* complete the index into the arrays */
bs[ FIXME ][ FIXME ] = b[ (boff + FIXME) ];
}
/* increment A and B offsets for next round of data reads */
boff += BK;
aoff += m * BK;
/* triply nested loop to perform the matmult on the blocks */
#pragma unroll
/* insert code to complete the loop bounds for j and i */
for( int k = 0 ; k < BK ; k++ )
{
#pragma unroll
for (int j = 0 ; j < FIXME ; j++ )
{
#pragma unroll
for (int i = 0 ; i < FIXME ; i++ )
{
/* insert code to complete the matrix multiply */
c_tmp[ i ][ j ] += as[ tx + TX*i ][ k ] * bs[ k ][ ty + j*TY ];
}
}
}
} /* end for Kblock */
/* set coff to its proper index int the C matrix */
/* insert code to set coff to its proper location in the C matrix */
int coff = INDX( FIXME, FIXME, m );
/* write results to the C matrix */
#pragma unroll
for ( int j = 0 ; j < FIXME ; j++ )
{
#pragma unroll
for ( int i = 0 ; i < FIXME ; i++ )
{
/* insert code to write c_tmp elements to the global C matrix */
c[ coff + INDX( FIXME, FIXME, m )] = c_tmp[FIXME][FIXME];
}
}
} /* end GPU_shmem1 */
int main( int argc, char *argv[] )
{
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
const int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
double *h_a, *h_b, *h_c, *h_c1;
double *d_a, *d_b, *d_c;
size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double );
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c = (double *) malloc( numbytes );
if( h_c == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c1 = (double *) malloc( numbytes );
if( h_c1 == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* zero out the host memory for C matrices */
memset( h_c, 0, numbytes );
memset( h_c1, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize the A and B matrices */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* allocate a, b, c in gpu memory */
checkCUDA( hipMalloc( (void **)&d_a, numbytes ) );
checkCUDA( hipMalloc( (void **)&d_b, numbytes ) );
checkCUDA( hipMalloc( (void **)&d_c, numbytes ) );
/* copy a and b to device */
checkCUDA( hipMemcpy( d_a, h_a, numbytes, hipMemcpyHostToDevice ) );
checkCUDA( hipMemcpy( d_b, h_b, numbytes, hipMemcpyHostToDevice ) );
hipblasHandle_t handle;
checkCUBLAS( hipblasCreate( &handle ) );
double alpha = 1.0;
double beta = 0.0;
/* start timers */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
/* call CUBLAS dgemm */
checkCUBLAS(
hipblasDgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
size, size, size,
&alpha,
d_a, size,
d_b, size,
&beta,
d_c, size )
);
/* stop timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
/* print GPU CUBLAS timing information */
fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C from device to host for error checking */
checkCUDA( hipMemcpy( h_c, d_c, numbytes, hipMemcpyDeviceToHost ) );
/* reset C on device to zero */
checkCUDA( hipMemset( d_c, 0, numbytes ) );
/* setup grid and block sizes */
dim3 threads( TX, TY, 1 );
dim3 blocks( size / ( TBX ), size / ( TBY ), 1 );
/* call GPU_naive */
printf("block.X %d block.Y %d\n",blocks.x, blocks.y );
printf("threads.x %d threads.y %d\n",threads.x, threads.y );
/* start timers */
checkCUDA( hipEventRecord( start, 0 ) );
/* call the kernel */
hipLaunchKernelGGL(( GPU_shmem2), dim3(blocks), dim3(threads) , 0, 0, size, d_a, d_b, d_c );
checkKERNEL()
/* stop timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
elapsedTime = 0.0f;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
/* print data for GPU naive */
fprintf(stdout, "Total time GPU SHMEM is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C back to host */
checkCUDA( hipMemcpy( h_c1, d_c, numbytes, hipMemcpyDeviceToHost ) );
checkCUBLAS( hipblasDestroy( handle ) );
checkCUDA( hipEventDestroy( start ) );
checkCUDA( hipEventDestroy( stop ) );
/* check CUBLAS versus GPU NAIVE numerical results */
double temp = 0.0;
for( int i = 0; i < size * size; i++ )
{
temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] );
} /* end for */
printf("error is %f\n",temp);
if( temp > 10 ) printf("FAIL\n");
else printf("PASS\n");
/* cleanup */
checkCUDA( hipFree( d_a ) );
checkCUDA( hipFree( d_b ) );
checkCUDA( hipFree( d_c ) );
free( h_a );
free( h_b );
free( h_c );
free( h_c1 );
checkCUDA( hipDeviceReset() );
return 0;
}
| f8f694eb3071eef1876de094f7c9ff314cd1439d.cu | /*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "cublas_v2.h"
#include "../debug.h"
/* macro for index calculations */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* matrix size and thread dimensions */
#define SIZE 1024
/* setup various hard-coded parameters for this kernel */
#define TBX 64 // Size of C this CTA is responsible for, x dimension
#define TBY 64 // Size of C this CTA is responsible for, y dimension
#define TX 16 // Thread block size, x dimension
#define TY 16 // Thread block size, y dimension
#define BK 16 // square block of K size
#define NX 4 // = TBX/TX == number of iterations to do TBX work with TX blocks
#define NY 4 // = TBY/TY == number of iterations to do TBY work with TY blocks
__global__ void GPU_shmem2(const int m, double const * const a, double const * const b, double *c )
{
/* setup some constants for later use */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * TBY;
const int ibx = blockIdx.x * TBX;
/* shared memory arrays for A and B */
/* insert code for shared mem array sizes */
__shared__ double as[ FIXME ][ FIXME ];
__shared__ double bs[ FIXME ][ FIXME ];
/* space for C to be held in registers */
/* insert code for c_tmp size */
double c_tmp[ FIXME ][ FIXME ] ;
/* zero the temp C array */
#pragma unroll
/* complete the upper limit of the for loops */
for ( int i = 0 ; i < FIXME ; i++) {
for ( int j = 0 ; j < FIXME ; j++) {
c_tmp[i][j] = 0.0;
}
}
/* calculate my initial offset into A and B */
int aoff = INDX( ibx + tx, ty, m );
int boff = INDX( tx, iby + ty, m );
/* main loop over blocks of K */
for( int Kblock = 0; Kblock < m; Kblock+=BK )
{
/* read block of A into shared memory */
#pragma unroll
for ( int i = 0; i < NX ; i ++ )
{
/* complete the index into the arrays */
as[ FIXME ][ FIXME ] = a[ (aoff + FIXME) ];
}
/* read block of B into shared memory */
#pragma unroll
for ( int i = 0; i < NY ; i ++ )
{
/* complete the index into the arrays */
bs[ FIXME ][ FIXME ] = b[ (boff + FIXME) ];
}
/* increment A and B offsets for next round of data reads */
boff += BK;
aoff += m * BK;
/* triply nested loop to perform the matmult on the blocks */
#pragma unroll
/* insert code to complete the loop bounds for j and i */
for( int k = 0 ; k < BK ; k++ )
{
#pragma unroll
for (int j = 0 ; j < FIXME ; j++ )
{
#pragma unroll
for (int i = 0 ; i < FIXME ; i++ )
{
/* insert code to complete the matrix multiply */
c_tmp[ i ][ j ] += as[ tx + TX*i ][ k ] * bs[ k ][ ty + j*TY ];
}
}
}
} /* end for Kblock */
/* set coff to its proper index int the C matrix */
/* insert code to set coff to its proper location in the C matrix */
int coff = INDX( FIXME, FIXME, m );
/* write results to the C matrix */
#pragma unroll
for ( int j = 0 ; j < FIXME ; j++ )
{
#pragma unroll
for ( int i = 0 ; i < FIXME ; i++ )
{
/* insert code to write c_tmp elements to the global C matrix */
c[ coff + INDX( FIXME, FIXME, m )] = c_tmp[FIXME][FIXME];
}
}
} /* end GPU_shmem1 */
int main( int argc, char *argv[] )
{
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
const int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
double *h_a, *h_b, *h_c, *h_c1;
double *d_a, *d_b, *d_c;
size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double );
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c = (double *) malloc( numbytes );
if( h_c == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c1 = (double *) malloc( numbytes );
if( h_c1 == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* zero out the host memory for C matrices */
memset( h_c, 0, numbytes );
memset( h_c1, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize the A and B matrices */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* allocate a, b, c in gpu memory */
checkCUDA( cudaMalloc( (void **)&d_a, numbytes ) );
checkCUDA( cudaMalloc( (void **)&d_b, numbytes ) );
checkCUDA( cudaMalloc( (void **)&d_c, numbytes ) );
/* copy a and b to device */
checkCUDA( cudaMemcpy( d_a, h_a, numbytes, cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemcpy( d_b, h_b, numbytes, cudaMemcpyHostToDevice ) );
cublasHandle_t handle;
checkCUBLAS( cublasCreate( &handle ) );
double alpha = 1.0;
double beta = 0.0;
/* start timers */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
/* call CUBLAS dgemm */
checkCUBLAS(
cublasDgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N,
size, size, size,
&alpha,
d_a, size,
d_b, size,
&beta,
d_c, size )
);
/* stop timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
/* print GPU CUBLAS timing information */
fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C from device to host for error checking */
checkCUDA( cudaMemcpy( h_c, d_c, numbytes, cudaMemcpyDeviceToHost ) );
/* reset C on device to zero */
checkCUDA( cudaMemset( d_c, 0, numbytes ) );
/* setup grid and block sizes */
dim3 threads( TX, TY, 1 );
dim3 blocks( size / ( TBX ), size / ( TBY ), 1 );
/* call GPU_naive */
printf("block.X %d block.Y %d\n",blocks.x, blocks.y );
printf("threads.x %d threads.y %d\n",threads.x, threads.y );
/* start timers */
checkCUDA( cudaEventRecord( start, 0 ) );
/* call the kernel */
GPU_shmem2<<< blocks, threads >>> ( size, d_a, d_b, d_c );
checkKERNEL()
/* stop timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
elapsedTime = 0.0f;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
/* print data for GPU naive */
fprintf(stdout, "Total time GPU SHMEM is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C back to host */
checkCUDA( cudaMemcpy( h_c1, d_c, numbytes, cudaMemcpyDeviceToHost ) );
checkCUBLAS( cublasDestroy( handle ) );
checkCUDA( cudaEventDestroy( start ) );
checkCUDA( cudaEventDestroy( stop ) );
/* check CUBLAS versus GPU NAIVE numerical results */
double temp = 0.0;
for( int i = 0; i < size * size; i++ )
{
temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] );
} /* end for */
printf("error is %f\n",temp);
if( temp > 10 ) printf("FAIL\n");
else printf("PASS\n");
/* cleanup */
checkCUDA( cudaFree( d_a ) );
checkCUDA( cudaFree( d_b ) );
checkCUDA( cudaFree( d_c ) );
free( h_a );
free( h_b );
free( h_c );
free( h_c1 );
checkCUDA( cudaDeviceReset() );
return 0;
}
|
a6fe85a97eb0eeefb28567112451834ec160088e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/FusedRNNKernel.cu"
#else
#include <cstdarg>
#include "../common.h"
#define DATATYPE TensorUtils<THCTensor>::DataType
//factor will be 3 for GRU and 4 for LSTM
void THNN_(FusedRNNAssertSizes)(THCState *state, int factor, int count, ...)
{
va_list list;
va_start(list, count);
THCTensor *input = va_arg(list, THCTensor*);
THCTensor *hidden = va_arg(list, THCTensor*);
THArgCheck(THCTensor_(nElement)(state, input) ==
THCTensor_(nElement)(state, hidden),
3, "Input and Hidden tensor sizes should be the same.");
THAssertMsg(TensorUtils<THCTensor>::getDims(state, input) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
THAssertMsg(TensorUtils<THCTensor>::getDims(state, hidden) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
for (int arg=2; arg < count; ++arg){
THCTensor *tens = va_arg(list, THCTensor*);
THArgCheck(THCTensor_(nElement)(state, input) ==
THCTensor_(nElement)(state, tens)*factor,
3, "A pointwise tensor was not the right size, should have 1/%u the elements of input/hidden tensor.", arg, factor);
THAssertMsg(TensorUtils<THCTensor>::getDims(state, tens) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
}
va_end(list);
}
int THNN_(minIndexType)(THCState *state, int count, ...)
{
va_list list;
va_start(list, count);
THCTensor* tens = va_arg(list, THCTensor*);
int startDim = TensorUtils<THCTensor>::getDims(state, tens);
bool canCollapse = THCTensor_(isContiguous)(state,tens);
for (int arg=1; arg < count; ++arg){
tens = va_arg(list, THCTensor*);
canCollapse = canCollapse && THCTensor_(isContiguous)(state, tens);
if(TensorUtils<THCTensor>::getDims(state, tens) != startDim){
va_end(list);
return -1;
}
}
va_end(list);
if(canCollapse) return -2;
return startDim;
}
bool THNN_(canUse32BitIndexMath)(THCState *state, int count, ...)
{
va_list list;
va_start(list, count);
for (int arg=0; arg < count; ++arg){
THCTensor *tens = va_arg(list, THCTensor*);
if (!TensorUtils<THCTensor>::canUse32BitIndexMath(state, tens)){
va_end(list);
return false;
}
}
va_end(list);
return true;
}
#define DEVICE_LINEAR_GET(D_TENSOR, INDEX) \
D_TENSOR.data[IndexToOffset<T, IndexType, Dims>::get(INDEX, D_TENSOR)]
#define H2F(input) __half2float(input)
#define F2H(input) __float2half(input)
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(GRUForward)(TensorInfo<T, IndexType> Input,
TensorInfo<T, IndexType> Hidden,
TensorInfo<T, IndexType> Bias1,
TensorInfo<T, IndexType> Bias2,
TensorInfo<T, IndexType> _hx,
TensorInfo<T, IndexType> _hy,
TensorInfo<T, IndexType> storage,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x)
{
IndexType offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz;
T ir = DEVICE_LINEAR_GET(Input, offset+0*hsz);
T ii = DEVICE_LINEAR_GET(Input, offset+1*hsz);
T in = DEVICE_LINEAR_GET(Input, offset+2*hsz);
T hr = DEVICE_LINEAR_GET(Hidden,offset+0*hsz);
T hi = DEVICE_LINEAR_GET(Hidden,offset+1*hsz);
T hn = DEVICE_LINEAR_GET(Hidden, offset+2*hsz);
T hx = DEVICE_LINEAR_GET(_hx, linearIndex);
T* hy = &DEVICE_LINEAR_GET(_hy, linearIndex);
bool has_bias = (Bias1.data != NULL);
T b1r, b1i, b1n, b2r, b2i, b2n;
if(has_bias){
b1r = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+0*hsz);
b1i = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+1*hsz);
b1n = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+2*hsz);
b2r = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+0*hsz);
b2i = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+1*hsz);
b2n = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+2*hsz);
}else{
#ifndef THC_REAL_IS_HALF
b1r = 0.0; b1i = 0.0; b1n = 0.0;
b2r = 0.0; b2i = 0.0; b2n = 0.0;
#else
b1r = F2H(0.0); b1i = F2H(0.0); b1n = F2H(0.0);
b2r = F2H(0.0); b2i = F2H(0.0); b2n = F2H(0.0);
#endif
}
offset = (linearIndex/hsz)*5*hsz+linearIndex%hsz;
#ifndef THC_REAL_IS_HALF
T rg, ig, ng;
rg = ir + hr + b1r + b2r;
ig = ii + hi + b1i + b2i;
TensorSigmoidOp<real>()(&rg, &rg);
TensorSigmoidOp<real>()(&ig, &ig);
ng = in + b1n + rg * (hn + b2n);
ng = THCNumerics<T>::tanh(ng);
*hy = ng + ig * (hx - ng);
//SAVE FOR BACKWARDS
DEVICE_LINEAR_GET(storage, offset+0*hsz) = rg;
DEVICE_LINEAR_GET(storage, offset+1*hsz) = ig;
DEVICE_LINEAR_GET(storage, offset+2*hsz) = ng;
DEVICE_LINEAR_GET(storage, offset+3*hsz) = hx;
DEVICE_LINEAR_GET(storage, offset+4*hsz) = hn + b2n;
#else
float rg, ig, ng;
rg = H2F(ir) + H2F(hr) + H2F(b1r) + H2F(b2r);
ig = H2F(ii) + H2F(hi) + H2F(b1i) + H2F(b2i);
TensorSigmoidOp<float>()(&rg, &rg);
TensorSigmoidOp<float>()(&ig, &ig);
ng = H2F(in) + H2F(b1n) + rg*( H2F(hn)+H2F(b2n) );
ng = THCNumerics<float>::tanh(ng);
*hy = F2H( ng + ig * ( H2F(hx)-ng ) );
//SAVE FOR BACKWARDS
DEVICE_LINEAR_GET(storage, offset+0*hsz) = F2H(rg);
DEVICE_LINEAR_GET(storage, offset+1*hsz) = F2H(ig);
DEVICE_LINEAR_GET(storage, offset+2*hsz) = F2H(ng);
DEVICE_LINEAR_GET(storage, offset+3*hsz) = hx;
DEVICE_LINEAR_GET(storage, offset+4*hsz) = F2H(H2F(hn) + H2F(b2n));
#endif
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(GRUBackward)(TensorInfo<T, IndexType> gradInInput,
TensorInfo<T, IndexType> gradInHidden,
TensorInfo<T, IndexType> gradOutput,
TensorInfo<T, IndexType> gradInputHx,
TensorInfo<T, IndexType> storage,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
IndexType offset = (linearIndex/hsz)*5*hsz+linearIndex%hsz;
T rg = DEVICE_LINEAR_GET(storage, offset+0*hsz);
T ig = DEVICE_LINEAR_GET(storage, offset+1*hsz);
T ng = DEVICE_LINEAR_GET(storage, offset+2*hsz);
T hx = DEVICE_LINEAR_GET(storage, offset+3*hsz);
T hn = DEVICE_LINEAR_GET(storage, offset+4*hsz);
T go = DEVICE_LINEAR_GET(gradOutput, linearIndex);
offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz;
#ifndef THC_REAL_IS_HALF
T gig = go*(hx-ng)*(1-ig)*(ig);
T ghx = go*(ig);
T gin = go*(1-ig)*(1-ng*ng);
T ghn = gin *rg;
T grg = gin*hn*(1-rg)*rg;
DEVICE_LINEAR_GET(gradInputHx, linearIndex) = ghx;
DEVICE_LINEAR_GET(gradInInput, offset+0*hsz) = grg;
DEVICE_LINEAR_GET(gradInInput, offset+1*hsz) = gig;
DEVICE_LINEAR_GET(gradInInput, offset+2*hsz) = gin;
DEVICE_LINEAR_GET(gradInHidden, offset+0*hsz) = grg;
DEVICE_LINEAR_GET(gradInHidden, offset+1*hsz) = gig;
DEVICE_LINEAR_GET(gradInHidden, offset+2*hsz) = ghn;
#else
float gig = H2F(go)*( H2F(hx)-H2F(ng) )*( 1-H2F(ig) )*H2F(ig);
float ghx = H2F(go)*H2F(ig);
float gin = H2F(go)*( 1-H2F(ig) )*( 1-H2F(ng)*H2F(ng) );
float ghn = H2F(gin) * H2F(rg);
float grg = H2F(gin)*H2F(hn)*( 1-H2F(rg) )*H2F(rg);
DEVICE_LINEAR_GET(gradInInput, offset+0*hsz) = F2H(grg);
DEVICE_LINEAR_GET(gradInInput, offset+1*hsz) = F2H(gig);
DEVICE_LINEAR_GET(gradInInput, offset+2*hsz) = F2H(gin);
DEVICE_LINEAR_GET(gradInHidden, offset+0*hsz) = F2H(grg);
DEVICE_LINEAR_GET(gradInHidden, offset+1*hsz) = F2H(gig);
DEVICE_LINEAR_GET(gradInHidden, offset+2*hsz) = F2H(ghn);
DEVICE_LINEAR_GET(gradInputHx, linearIndex) = F2H(ghx);
#endif
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(LSTMForward)(TensorInfo<T, IndexType> input,
TensorInfo<T, IndexType> hidden,
TensorInfo<T, IndexType> bias1,
TensorInfo<T, IndexType> bias2,
TensorInfo<T, IndexType> _cx,
TensorInfo<T, IndexType> _hy,
TensorInfo<T, IndexType> _cy,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x)
{
IndexType offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz;
T* iig = &DEVICE_LINEAR_GET(input, offset+0*hsz);
T* ifg = &DEVICE_LINEAR_GET(input, offset+1*hsz);
T* icg = &DEVICE_LINEAR_GET(input, offset+2*hsz);
T* iog = &DEVICE_LINEAR_GET(input, offset+3*hsz);
T hig = DEVICE_LINEAR_GET(hidden, offset+0*hsz);
T hfg = DEVICE_LINEAR_GET(hidden, offset+1*hsz);
T hcg = DEVICE_LINEAR_GET(hidden, offset+2*hsz);
T hog = DEVICE_LINEAR_GET(hidden, offset+3*hsz);
T cx = DEVICE_LINEAR_GET(_cx, linearIndex);
T* hy = &DEVICE_LINEAR_GET(_hy, linearIndex);
T* cy = &DEVICE_LINEAR_GET(_cy, linearIndex);
bool has_bias = (bias1.data != NULL);
T b1i, b1f, b1c, b1o;
T b2i, b2f, b2c, b2o;
if(has_bias){
b1i = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+0*hsz);
b1f = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+1*hsz);
b1c = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+2*hsz);
b1o = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+3*hsz);
b2i = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+0*hsz);
b2f = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+1*hsz);
b2c = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+2*hsz);
b2o = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+3*hsz);
}else{
#ifndef THC_REAL_IS_HALF
b1i = 0.0; b1f = 0.0; b1c = 0.0; b1o = 0.0;
b2i = 0.0; b2f = 0.0; b2c = 0.0; b2o = 0.0;
#else
b1i = F2H(0.0); b1f = F2H(0.0); b1c = F2H(0.0); b1o = F2H(0.0);
b2i = F2H(0.0); b2f = F2H(0.0); b2c = F2H(0.0); b2o = F2H(0.0);
#endif
}
#ifndef THC_REAL_IS_HALF
T ig, fg, cg, og;
ig = *iig + hig + b1i + b2i;
fg = *ifg + hfg + b1f + b2f;
cg = *icg + hcg + b1c + b2c;
og = *iog + hog + b1o + b2o;
TensorSigmoidOp<real>()(&ig, &ig);
TensorSigmoidOp<real>()(&fg, &fg);
cg = THCNumerics<T>::tanh(cg);
TensorSigmoidOp<real>()(&og, &og);
*cy = (fg * cx) + (ig * cg);
*hy = og * THCNumerics<T>::tanh(*cy);
*iig = ig;
*ifg = fg;
*icg = cg;
*iog = og;
#else
float ig, fg, cg, og;
float f_hy, f_cy;
ig = H2F(*iig) + H2F(hig) + H2F(b1i) + H2F(b2i);
fg = H2F(*ifg) + H2F(hfg) + H2F(b1f) + H2F(b2f);
cg = H2F(*icg) + H2F(hcg) + H2F(b1c) + H2F(b2c);
og = H2F(*iog) + H2F(hog) + H2F(b1o) + H2F(b2o);
TensorSigmoidOp<float>()(&ig, &ig);
TensorSigmoidOp<float>()(&fg, &fg);
cg = THCNumerics<float>::tanh(cg);
TensorSigmoidOp<float>()(&og, &og);
f_cy = (fg * H2F(cx) ) + (ig * cg);
f_hy = og * THCNumerics<float>::tanh(f_cy);
*hy = F2H(f_hy);
*cy = F2H(f_cy);
//SAVE FOR BACKWARDS
//Also need cy and cx but can be saved easily in python
*iig = F2H(ig);
*ifg = F2H(fg);
*icg = F2H(cg);
*iog = F2H(og);
#endif
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(LSTMBackward)(TensorInfo<T, IndexType> storage,
TensorInfo<T, IndexType> gradInGates,
TensorInfo<T, IndexType> _cx,
TensorInfo<T, IndexType> _cy,
TensorInfo<T, IndexType> gradoutput,
TensorInfo<T, IndexType> gradoutputcell,
TensorInfo<T, IndexType> gradInputCx,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
IndexType offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz;
T ig = DEVICE_LINEAR_GET(storage, offset+0*hsz);
T fg = DEVICE_LINEAR_GET(storage, offset+1*hsz);
T cg = DEVICE_LINEAR_GET(storage, offset+2*hsz);
T og = DEVICE_LINEAR_GET(storage, offset+3*hsz);
T* ih = &DEVICE_LINEAR_GET(gradInGates, offset+0*hsz);
T* fh = &DEVICE_LINEAR_GET(gradInGates, offset+1*hsz);
T* ch = &DEVICE_LINEAR_GET(gradInGates, offset+2*hsz);
T* oh = &DEVICE_LINEAR_GET(gradInGates, offset+3*hsz);
//will return hidden grads here
T cx = DEVICE_LINEAR_GET(_cx, linearIndex);
T cy = DEVICE_LINEAR_GET(_cy, linearIndex);
T* gi = &DEVICE_LINEAR_GET(gradInputCx, linearIndex);
T go = DEVICE_LINEAR_GET(gradoutput, linearIndex);
T goc= DEVICE_LINEAR_GET(gradoutputcell, linearIndex);
#ifndef THC_REAL_IS_HALF
T gcx = THCNumerics<T>::tanh(cy);
T gog = go * gcx;
gcx = go * og * ( 1 - gcx*gcx) + goc;
T gig = gcx * cg;
T gfg = gcx * cx;
T gcg = gcx * ig;
gcx = gcx * fg;
gig = gig * (1-ig) * ig;
gfg = gfg * (1-fg) * fg;
gcg = gcg * (1-cg*cg);
gog = gog * (1-og) * og;
*ih = gig;
*fh = gfg;
*ch = gcg;
*oh = gog;
*gi = gcx;
#else
float gcx = THCNumerics<float>::tanh(H2F(cy));
float gog = H2F(go) * gcx;
gcx = H2F(go) * H2F(og) * ( 1 - gcx*gcx) + H2F(goc);
float gcg = gcx * H2F(fg);
float gfg = gcx * H2F(cg);
float gig = gcx * H2F(cx);
gog = gog * ( (1-H2F(og))*H2F(og) );
gcg = gcg * (1-H2F(cg)*H2F(cg));
gfg = gfg * ( (1-H2F(fg))*H2F(fg) );
gig = gig * ( (1-H2F(ig))*H2F(ig) );
*ih = F2H(gig);
*fh = F2H(gfg);
*ch = F2H(gcg);
*oh = F2H(gog);
*gi = F2H(gcx);
#endif
}
}
// ************ START Create function calls ********** //
#define FILL_FUNCTION(ITYPE, DIM, FUNCTION) FUNCTION(ITYPE, DIM)
#define FILL_DIM(ITYPE, DIM, FUNCTION) \
switch (DIM) { \
case -2: \
FILL_FUNCTION(ITYPE, -2, FUNCTION); \
break; \
case 1: \
FILL_FUNCTION(ITYPE, 1, FUNCTION); \
break; \
case 2: \
FILL_FUNCTION(ITYPE, 2, FUNCTION); \
break; \
default: \
FILL_FUNCTION(ITYPE, -1, FUNCTION); \
break; \
}
#define LSTM_FORWARD(ITYPE, DIM)hipLaunchKernelGGL(( THNN_(LSTMForward) \
<DATATYPE, ITYPE, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
inputI, hiddenI, \
bias1I, bias2I, cxI, hyI, cyI, \
hid_size, totalElements);
#define LSTM_BACKWARD(ITYPE, DIM)hipLaunchKernelGGL(( THNN_(LSTMBackward) \
<DATATYPE, ITYPE, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
storageI, gradingatesI, cxI, cyI, \
gradoutI, gradoutcI, gradincxI, \
hid_size, totalElements);
#define GRU_FORWARD(ITYPE, DIM)hipLaunchKernelGGL(( THNN_(GRUForward)<DATATYPE, ITYPE, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
inputI, hiddenI, bias1I, bias2I, hxI, hyI, storageI, \
hid_size, totalElements);
#define GRU_BACKWARD(ITYPE, DIM)hipLaunchKernelGGL(( THNN_(GRUBackward) \
<DATATYPE, ITYPE, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
gradininputI, gradinhiddenI, gradoutI, gradinhxI, storageI, \
hid_size, totalElements);
// ************ END Create actual function calls ************ //
template<typename INDTYPE>
void THNN_(LSTM_forw_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *cx,
THCTensor *hy,
THCTensor *cy)
{
bool has_bias = (bias1!=NULL);
int maxDim;
if(has_bias){
THCUNN_assertSameGPU(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
maxDim = THNN_(minIndexType)
(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
}else{
THCUNN_assertSameGPU(state, 5, input, hidden, hy, cy, cx);
maxDim = THNN_(minIndexType)
(state, 5, input, hidden, hy, cy, cx);
}
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, cx);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply.");
TensorInfo<DATATYPE, INDTYPE> inputI =
getTensorInfo<THCTensor, INDTYPE>(state, input);
TensorInfo<DATATYPE, INDTYPE> hiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, hidden);
TensorInfo<DATATYPE, INDTYPE> cxI =
getTensorInfo<THCTensor, INDTYPE>(state, cx);
TensorInfo<DATATYPE, INDTYPE> hyI =
getTensorInfo<THCTensor, INDTYPE>(state, hy);
TensorInfo<DATATYPE, INDTYPE> cyI =
getTensorInfo<THCTensor, INDTYPE>(state, cy);
INDTYPE hid_size = cxI.sizes[cxI.dims-1];
if(has_bias){
THAssertMsg( hid_size*4 == THCTensor_(nElement)(state, bias1) &&
hid_size*4 == THCTensor_(nElement)(state, bias2),
"Bias in pointwise operation is an incorrect size, must be 4 x feature size.");
}
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
cxI.collapseDims();
hyI.collapseDims();
cyI.collapseDims();
}
INDTYPE zero[1] = {0};
TensorInfo<DATATYPE, INDTYPE> nullinfo =
TensorInfo<DATATYPE, INDTYPE>(NULL, 1, zero, zero);
TensorInfo<DATATYPE, INDTYPE> bias1I = nullinfo;
TensorInfo<DATATYPE, INDTYPE> bias2I = nullinfo;
if(has_bias){
bias1I = getTensorInfo<THCTensor, INDTYPE>(state, bias1);
bias2I = getTensorInfo<THCTensor, INDTYPE>(state, bias2);
if(maxDim == -2){
bias1I.collapseDims();
bias2I.collapseDims();
}
}
FILL_DIM(INDTYPE, maxDim, LSTM_FORWARD);
}
void THNN_(LSTMFused_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *cx,
THCTensor *hy,
THCTensor *cy)
{
THCTensor_(resizeAs)(state, hy, cx);
THCTensor_(resizeAs)(state, cy, cx);
THNN_(FusedRNNAssertSizes)(state, 4, 5, input, hidden, hy, cy, cx);
bool has_bias = (bias1!=NULL);
bool canUse32bi;
if(has_bias){
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
}else{
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 5, input, hidden, hy, cy, cx);
}
if(canUse32bi){
THNN_(LSTM_forw_ind_wrap)<uint32_t>
(state, input, hidden, bias1, bias2, cx, hy, cy);
}else{
THNN_(LSTM_forw_ind_wrap)<uint64_t>
(state, input, hidden, bias1, bias2, cx, hy, cy);
}
THCudaCheck(hipGetLastError());
}
template<typename INDTYPE>
void THNN_(LSTM_back_ind_wrap)(
THCState *state,
THCTensor *storage,
THCTensor *gradInGates,
THCTensor *cx,
THCTensor *cy,
THCTensor *gradOutput,
THCTensor *gradOutputCell,
THCTensor *gradInputCx)
{
int maxDim = THNN_(minIndexType)
(state, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, gradOutput);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply");
TensorInfo<DATATYPE, INDTYPE> storageI =
getTensorInfo<THCTensor, INDTYPE>(state, storage);
TensorInfo<DATATYPE, INDTYPE> gradingatesI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInGates);
TensorInfo<DATATYPE, INDTYPE> cxI =
getTensorInfo<THCTensor, INDTYPE>(state, cx);
TensorInfo<DATATYPE, INDTYPE> cyI =
getTensorInfo<THCTensor, INDTYPE>(state, cy);
TensorInfo<DATATYPE, INDTYPE> gradoutI =
getTensorInfo<THCTensor, INDTYPE>(state, gradOutput);
TensorInfo<DATATYPE, INDTYPE> gradoutcI =
getTensorInfo<THCTensor, INDTYPE>(state, gradOutputCell);
TensorInfo<DATATYPE, INDTYPE> gradincxI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInputCx);
INDTYPE hid_size = gradoutI.sizes[gradoutI.dims-1];
if(maxDim == -2){
storageI.collapseDims();
gradingatesI.collapseDims();
cxI.collapseDims();
cyI.collapseDims();
gradoutI.collapseDims();
gradoutcI.collapseDims();
gradincxI.collapseDims();
}
FILL_DIM(INDTYPE, maxDim, LSTM_BACKWARD);
}
void THNN_(LSTMFused_updateGradInput)(
THCState *state,
THCTensor *storage,
THCTensor *gradInGates,
THCTensor *cx,
THCTensor *cy,
THCTensor *gradOutput,
THCTensor *gradOutputCell,
THCTensor *gradInputCx)
{
THCTensor_(resizeAs)(state, gradInputCx, gradOutput);
THCUNN_assertSameGPU(state, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
THNN_(FusedRNNAssertSizes)
(state, 4, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
bool canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
if(canUse32bi){
THNN_(LSTM_back_ind_wrap)<uint32_t>
(state, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
}else{
THNN_(LSTM_back_ind_wrap)<uint64_t>
(state, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
}
THCudaCheck(hipGetLastError());
}
template<typename INDTYPE>
void THNN_(GRU_forw_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *hx,
THCTensor *hy,
THCTensor *storage)
{
bool has_bias = (bias1!=NULL);
int maxDim;
if(has_bias){
THCUNN_assertSameGPU
(state, 7, input, hidden, hx, hy, bias1, bias2, storage);
maxDim = THNN_(minIndexType)
(state, 7, input, hidden, hx, hy, bias1, bias2, storage);
}else{
THCUNN_assertSameGPU
(state, 5, input, hidden, hx, hy, storage);
maxDim = THNN_(minIndexType)
(state, 5, input, hidden, hx, hy, storage);
}
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, hx);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply.");
TensorInfo<DATATYPE, INDTYPE> inputI =
getTensorInfo<THCTensor, INDTYPE>(state, input);
TensorInfo<DATATYPE, INDTYPE> hiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, hidden);
TensorInfo<DATATYPE, INDTYPE> hxI =
getTensorInfo<THCTensor, INDTYPE>(state, hx);
TensorInfo<DATATYPE, INDTYPE> hyI =
getTensorInfo<THCTensor, INDTYPE>(state, hy);
TensorInfo<DATATYPE, INDTYPE> storageI =
getTensorInfo<THCTensor, INDTYPE>(state, storage);
INDTYPE hid_size = hxI.sizes[hxI.dims-1];
if(has_bias){
THAssertMsg( hid_size*3 == THCTensor_(nElement)(state, bias1) &&
hid_size*3 == THCTensor_(nElement)(state, bias2),
"Bias in pointwise operation is an incorrect size, must be 3 x feature size.");
}
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
hyI.collapseDims();
hxI.collapseDims();
storageI.collapseDims();
}
INDTYPE zero[1] = {0};
TensorInfo<DATATYPE, INDTYPE> nullinfo =
TensorInfo<DATATYPE, INDTYPE>(NULL, 1, zero, zero);
TensorInfo<DATATYPE, INDTYPE> bias1I = nullinfo;
TensorInfo<DATATYPE, INDTYPE> bias2I = nullinfo;
if(has_bias){
bias1I = getTensorInfo<THCTensor, INDTYPE>(state, bias1);
bias2I = getTensorInfo<THCTensor, INDTYPE>(state, bias2);
if(maxDim == -2){
bias1I.collapseDims();
bias2I.collapseDims();
}
}
FILL_DIM(INDTYPE, maxDim, GRU_FORWARD);
}
void THNN_(GRUFused_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *hx,
THCTensor *hy,
THCTensor *storage)
{
THCTensor_(resizeAs)(state, hy, hx);
THNN_(FusedRNNAssertSizes)(state, 3, 4, input, hidden, hx, hy);
THArgCheck(THCTensor_(nElement)(state, storage) ==
THCTensor_(nElement)(state, hx)*5,
3, "Storage tensor for fused kernel was not sized correctly.");
bool has_bias = (bias1!=NULL);
bool canUse32bi;
if(has_bias){
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, input, hidden, hx, hy, bias1, bias2, storage);
}else{
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 5, input, hidden, hx, hy, storage);
}
if(canUse32bi){
THNN_(GRU_forw_ind_wrap)<uint32_t>
(state, input, hidden, bias1, bias2, hx, hy, storage);
}else{
THNN_(GRU_forw_ind_wrap)<uint64_t>
(state, input, hidden, bias1, bias2, hx, hy, storage);
}
THCudaCheck(hipGetLastError());
}
template<typename INDTYPE>
void THNN_(GRU_back_ind_wrap)(
THCState *state,
THCTensor *gradInInput,
THCTensor *gradInHidden,
THCTensor *gradOutput,
THCTensor *gradInputHx,
THCTensor *storage)
{
int maxDim = THNN_(minIndexType)(state, 5, gradInInput, gradInHidden, gradOutput,
gradInputHx, storage);
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, gradOutput);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply");
TensorInfo<DATATYPE, INDTYPE> gradininputI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInInput);
TensorInfo<DATATYPE, INDTYPE> gradinhiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInHidden);
TensorInfo<DATATYPE, INDTYPE> gradoutI =
getTensorInfo<THCTensor, INDTYPE>(state, gradOutput);
TensorInfo<DATATYPE, INDTYPE> gradinhxI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInputHx);
TensorInfo<DATATYPE, INDTYPE> storageI =
getTensorInfo<THCTensor, INDTYPE>(state, storage);
INDTYPE hid_size = gradoutI.sizes[gradoutI.dims-1];
if(maxDim == -2){
gradininputI.collapseDims();
gradinhiddenI.collapseDims();
gradoutI.collapseDims();
gradinhxI.collapseDims();
storageI.collapseDims();
}
FILL_DIM(INDTYPE, maxDim, GRU_BACKWARD);
}
void THNN_(GRUFused_updateGradInput)(
THCState *state,
THCTensor *gradInInput,
THCTensor *gradInHidden,
THCTensor *gradOutput,
THCTensor *gradInputHx,
THCTensor *storage)
{
THCTensor_(resizeAs)(state, gradInputHx, gradOutput);
THCUNN_assertSameGPU(state, 5, gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
THNN_(FusedRNNAssertSizes)(state, 3, 4, gradInInput, gradInHidden, gradOutput, gradInputHx);
bool canUse32bi = THNN_(canUse32BitIndexMath)(state, 5, gradInInput, gradInHidden,
gradOutput, gradInputHx, storage);
if(canUse32bi){
THNN_(GRU_back_ind_wrap)<uint32_t>
(state, gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
}else{
THNN_(GRU_back_ind_wrap)<uint64_t>
(state, gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
}
THCudaCheck(hipGetLastError());
}
//Clean up compiler namespace
#undef DEVICE_LINEAR_GET
#undef H2F
#undef F2H
#undef EXPAND_FUNCTION
#undef EXPAND_DIM
#undef EXPAND_TYPE
#undef FILL_TYPES_FORWARD
#undef FILL_FORWARD
#undef FILL_TYPES_BACKWARD
#undef FILL_BACKWARD
#endif
| a6fe85a97eb0eeefb28567112451834ec160088e.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/FusedRNNKernel.cu"
#else
#include <cstdarg>
#include "../common.h"
#define DATATYPE TensorUtils<THCTensor>::DataType
//factor will be 3 for GRU and 4 for LSTM
void THNN_(FusedRNNAssertSizes)(THCState *state, int factor, int count, ...)
{
va_list list;
va_start(list, count);
THCTensor *input = va_arg(list, THCTensor*);
THCTensor *hidden = va_arg(list, THCTensor*);
THArgCheck(THCTensor_(nElement)(state, input) ==
THCTensor_(nElement)(state, hidden),
3, "Input and Hidden tensor sizes should be the same.");
THAssertMsg(TensorUtils<THCTensor>::getDims(state, input) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
THAssertMsg(TensorUtils<THCTensor>::getDims(state, hidden) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
for (int arg=2; arg < count; ++arg){
THCTensor *tens = va_arg(list, THCTensor*);
THArgCheck(THCTensor_(nElement)(state, input) ==
THCTensor_(nElement)(state, tens)*factor,
3, "A pointwise tensor was not the right size, should have 1/%u the elements of input/hidden tensor.", arg, factor);
THAssertMsg(TensorUtils<THCTensor>::getDims(state, tens) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
}
va_end(list);
}
int THNN_(minIndexType)(THCState *state, int count, ...)
{
va_list list;
va_start(list, count);
THCTensor* tens = va_arg(list, THCTensor*);
int startDim = TensorUtils<THCTensor>::getDims(state, tens);
bool canCollapse = THCTensor_(isContiguous)(state,tens);
for (int arg=1; arg < count; ++arg){
tens = va_arg(list, THCTensor*);
canCollapse = canCollapse && THCTensor_(isContiguous)(state, tens);
if(TensorUtils<THCTensor>::getDims(state, tens) != startDim){
va_end(list);
return -1;
}
}
va_end(list);
if(canCollapse) return -2;
return startDim;
}
bool THNN_(canUse32BitIndexMath)(THCState *state, int count, ...)
{
va_list list;
va_start(list, count);
for (int arg=0; arg < count; ++arg){
THCTensor *tens = va_arg(list, THCTensor*);
if (!TensorUtils<THCTensor>::canUse32BitIndexMath(state, tens)){
va_end(list);
return false;
}
}
va_end(list);
return true;
}
#define DEVICE_LINEAR_GET(D_TENSOR, INDEX) \
D_TENSOR.data[IndexToOffset<T, IndexType, Dims>::get(INDEX, D_TENSOR)]
#define H2F(input) __half2float(input)
#define F2H(input) __float2half(input)
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(GRUForward)(TensorInfo<T, IndexType> Input,
TensorInfo<T, IndexType> Hidden,
TensorInfo<T, IndexType> Bias1,
TensorInfo<T, IndexType> Bias2,
TensorInfo<T, IndexType> _hx,
TensorInfo<T, IndexType> _hy,
TensorInfo<T, IndexType> storage,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x)
{
IndexType offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz;
T ir = DEVICE_LINEAR_GET(Input, offset+0*hsz);
T ii = DEVICE_LINEAR_GET(Input, offset+1*hsz);
T in = DEVICE_LINEAR_GET(Input, offset+2*hsz);
T hr = DEVICE_LINEAR_GET(Hidden,offset+0*hsz);
T hi = DEVICE_LINEAR_GET(Hidden,offset+1*hsz);
T hn = DEVICE_LINEAR_GET(Hidden, offset+2*hsz);
T hx = DEVICE_LINEAR_GET(_hx, linearIndex);
T* hy = &DEVICE_LINEAR_GET(_hy, linearIndex);
bool has_bias = (Bias1.data != NULL);
T b1r, b1i, b1n, b2r, b2i, b2n;
if(has_bias){
b1r = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+0*hsz);
b1i = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+1*hsz);
b1n = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+2*hsz);
b2r = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+0*hsz);
b2i = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+1*hsz);
b2n = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+2*hsz);
}else{
#ifndef THC_REAL_IS_HALF
b1r = 0.0; b1i = 0.0; b1n = 0.0;
b2r = 0.0; b2i = 0.0; b2n = 0.0;
#else
b1r = F2H(0.0); b1i = F2H(0.0); b1n = F2H(0.0);
b2r = F2H(0.0); b2i = F2H(0.0); b2n = F2H(0.0);
#endif
}
offset = (linearIndex/hsz)*5*hsz+linearIndex%hsz;
#ifndef THC_REAL_IS_HALF
T rg, ig, ng;
rg = ir + hr + b1r + b2r;
ig = ii + hi + b1i + b2i;
TensorSigmoidOp<real>()(&rg, &rg);
TensorSigmoidOp<real>()(&ig, &ig);
ng = in + b1n + rg * (hn + b2n);
ng = THCNumerics<T>::tanh(ng);
*hy = ng + ig * (hx - ng);
//SAVE FOR BACKWARDS
DEVICE_LINEAR_GET(storage, offset+0*hsz) = rg;
DEVICE_LINEAR_GET(storage, offset+1*hsz) = ig;
DEVICE_LINEAR_GET(storage, offset+2*hsz) = ng;
DEVICE_LINEAR_GET(storage, offset+3*hsz) = hx;
DEVICE_LINEAR_GET(storage, offset+4*hsz) = hn + b2n;
#else
float rg, ig, ng;
rg = H2F(ir) + H2F(hr) + H2F(b1r) + H2F(b2r);
ig = H2F(ii) + H2F(hi) + H2F(b1i) + H2F(b2i);
TensorSigmoidOp<float>()(&rg, &rg);
TensorSigmoidOp<float>()(&ig, &ig);
ng = H2F(in) + H2F(b1n) + rg*( H2F(hn)+H2F(b2n) );
ng = THCNumerics<float>::tanh(ng);
*hy = F2H( ng + ig * ( H2F(hx)-ng ) );
//SAVE FOR BACKWARDS
DEVICE_LINEAR_GET(storage, offset+0*hsz) = F2H(rg);
DEVICE_LINEAR_GET(storage, offset+1*hsz) = F2H(ig);
DEVICE_LINEAR_GET(storage, offset+2*hsz) = F2H(ng);
DEVICE_LINEAR_GET(storage, offset+3*hsz) = hx;
DEVICE_LINEAR_GET(storage, offset+4*hsz) = F2H(H2F(hn) + H2F(b2n));
#endif
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(GRUBackward)(TensorInfo<T, IndexType> gradInInput,
TensorInfo<T, IndexType> gradInHidden,
TensorInfo<T, IndexType> gradOutput,
TensorInfo<T, IndexType> gradInputHx,
TensorInfo<T, IndexType> storage,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
IndexType offset = (linearIndex/hsz)*5*hsz+linearIndex%hsz;
T rg = DEVICE_LINEAR_GET(storage, offset+0*hsz);
T ig = DEVICE_LINEAR_GET(storage, offset+1*hsz);
T ng = DEVICE_LINEAR_GET(storage, offset+2*hsz);
T hx = DEVICE_LINEAR_GET(storage, offset+3*hsz);
T hn = DEVICE_LINEAR_GET(storage, offset+4*hsz);
T go = DEVICE_LINEAR_GET(gradOutput, linearIndex);
offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz;
#ifndef THC_REAL_IS_HALF
T gig = go*(hx-ng)*(1-ig)*(ig);
T ghx = go*(ig);
T gin = go*(1-ig)*(1-ng*ng);
T ghn = gin *rg;
T grg = gin*hn*(1-rg)*rg;
DEVICE_LINEAR_GET(gradInputHx, linearIndex) = ghx;
DEVICE_LINEAR_GET(gradInInput, offset+0*hsz) = grg;
DEVICE_LINEAR_GET(gradInInput, offset+1*hsz) = gig;
DEVICE_LINEAR_GET(gradInInput, offset+2*hsz) = gin;
DEVICE_LINEAR_GET(gradInHidden, offset+0*hsz) = grg;
DEVICE_LINEAR_GET(gradInHidden, offset+1*hsz) = gig;
DEVICE_LINEAR_GET(gradInHidden, offset+2*hsz) = ghn;
#else
float gig = H2F(go)*( H2F(hx)-H2F(ng) )*( 1-H2F(ig) )*H2F(ig);
float ghx = H2F(go)*H2F(ig);
float gin = H2F(go)*( 1-H2F(ig) )*( 1-H2F(ng)*H2F(ng) );
float ghn = H2F(gin) * H2F(rg);
float grg = H2F(gin)*H2F(hn)*( 1-H2F(rg) )*H2F(rg);
DEVICE_LINEAR_GET(gradInInput, offset+0*hsz) = F2H(grg);
DEVICE_LINEAR_GET(gradInInput, offset+1*hsz) = F2H(gig);
DEVICE_LINEAR_GET(gradInInput, offset+2*hsz) = F2H(gin);
DEVICE_LINEAR_GET(gradInHidden, offset+0*hsz) = F2H(grg);
DEVICE_LINEAR_GET(gradInHidden, offset+1*hsz) = F2H(gig);
DEVICE_LINEAR_GET(gradInHidden, offset+2*hsz) = F2H(ghn);
DEVICE_LINEAR_GET(gradInputHx, linearIndex) = F2H(ghx);
#endif
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(LSTMForward)(TensorInfo<T, IndexType> input,
TensorInfo<T, IndexType> hidden,
TensorInfo<T, IndexType> bias1,
TensorInfo<T, IndexType> bias2,
TensorInfo<T, IndexType> _cx,
TensorInfo<T, IndexType> _hy,
TensorInfo<T, IndexType> _cy,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x)
{
IndexType offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz;
T* iig = &DEVICE_LINEAR_GET(input, offset+0*hsz);
T* ifg = &DEVICE_LINEAR_GET(input, offset+1*hsz);
T* icg = &DEVICE_LINEAR_GET(input, offset+2*hsz);
T* iog = &DEVICE_LINEAR_GET(input, offset+3*hsz);
T hig = DEVICE_LINEAR_GET(hidden, offset+0*hsz);
T hfg = DEVICE_LINEAR_GET(hidden, offset+1*hsz);
T hcg = DEVICE_LINEAR_GET(hidden, offset+2*hsz);
T hog = DEVICE_LINEAR_GET(hidden, offset+3*hsz);
T cx = DEVICE_LINEAR_GET(_cx, linearIndex);
T* hy = &DEVICE_LINEAR_GET(_hy, linearIndex);
T* cy = &DEVICE_LINEAR_GET(_cy, linearIndex);
bool has_bias = (bias1.data != NULL);
T b1i, b1f, b1c, b1o;
T b2i, b2f, b2c, b2o;
if(has_bias){
b1i = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+0*hsz);
b1f = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+1*hsz);
b1c = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+2*hsz);
b1o = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+3*hsz);
b2i = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+0*hsz);
b2f = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+1*hsz);
b2c = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+2*hsz);
b2o = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+3*hsz);
}else{
#ifndef THC_REAL_IS_HALF
b1i = 0.0; b1f = 0.0; b1c = 0.0; b1o = 0.0;
b2i = 0.0; b2f = 0.0; b2c = 0.0; b2o = 0.0;
#else
b1i = F2H(0.0); b1f = F2H(0.0); b1c = F2H(0.0); b1o = F2H(0.0);
b2i = F2H(0.0); b2f = F2H(0.0); b2c = F2H(0.0); b2o = F2H(0.0);
#endif
}
#ifndef THC_REAL_IS_HALF
T ig, fg, cg, og;
ig = *iig + hig + b1i + b2i;
fg = *ifg + hfg + b1f + b2f;
cg = *icg + hcg + b1c + b2c;
og = *iog + hog + b1o + b2o;
TensorSigmoidOp<real>()(&ig, &ig);
TensorSigmoidOp<real>()(&fg, &fg);
cg = THCNumerics<T>::tanh(cg);
TensorSigmoidOp<real>()(&og, &og);
*cy = (fg * cx) + (ig * cg);
*hy = og * THCNumerics<T>::tanh(*cy);
*iig = ig;
*ifg = fg;
*icg = cg;
*iog = og;
#else
float ig, fg, cg, og;
float f_hy, f_cy;
ig = H2F(*iig) + H2F(hig) + H2F(b1i) + H2F(b2i);
fg = H2F(*ifg) + H2F(hfg) + H2F(b1f) + H2F(b2f);
cg = H2F(*icg) + H2F(hcg) + H2F(b1c) + H2F(b2c);
og = H2F(*iog) + H2F(hog) + H2F(b1o) + H2F(b2o);
TensorSigmoidOp<float>()(&ig, &ig);
TensorSigmoidOp<float>()(&fg, &fg);
cg = THCNumerics<float>::tanh(cg);
TensorSigmoidOp<float>()(&og, &og);
f_cy = (fg * H2F(cx) ) + (ig * cg);
f_hy = og * THCNumerics<float>::tanh(f_cy);
*hy = F2H(f_hy);
*cy = F2H(f_cy);
//SAVE FOR BACKWARDS
//Also need cy and cx but can be saved easily in python
*iig = F2H(ig);
*ifg = F2H(fg);
*icg = F2H(cg);
*iog = F2H(og);
#endif
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(LSTMBackward)(TensorInfo<T, IndexType> storage,
TensorInfo<T, IndexType> gradInGates,
TensorInfo<T, IndexType> _cx,
TensorInfo<T, IndexType> _cy,
TensorInfo<T, IndexType> gradoutput,
TensorInfo<T, IndexType> gradoutputcell,
TensorInfo<T, IndexType> gradInputCx,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
IndexType offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz;
T ig = DEVICE_LINEAR_GET(storage, offset+0*hsz);
T fg = DEVICE_LINEAR_GET(storage, offset+1*hsz);
T cg = DEVICE_LINEAR_GET(storage, offset+2*hsz);
T og = DEVICE_LINEAR_GET(storage, offset+3*hsz);
T* ih = &DEVICE_LINEAR_GET(gradInGates, offset+0*hsz);
T* fh = &DEVICE_LINEAR_GET(gradInGates, offset+1*hsz);
T* ch = &DEVICE_LINEAR_GET(gradInGates, offset+2*hsz);
T* oh = &DEVICE_LINEAR_GET(gradInGates, offset+3*hsz);
//will return hidden grads here
T cx = DEVICE_LINEAR_GET(_cx, linearIndex);
T cy = DEVICE_LINEAR_GET(_cy, linearIndex);
T* gi = &DEVICE_LINEAR_GET(gradInputCx, linearIndex);
T go = DEVICE_LINEAR_GET(gradoutput, linearIndex);
T goc= DEVICE_LINEAR_GET(gradoutputcell, linearIndex);
#ifndef THC_REAL_IS_HALF
T gcx = THCNumerics<T>::tanh(cy);
T gog = go * gcx;
gcx = go * og * ( 1 - gcx*gcx) + goc;
T gig = gcx * cg;
T gfg = gcx * cx;
T gcg = gcx * ig;
gcx = gcx * fg;
gig = gig * (1-ig) * ig;
gfg = gfg * (1-fg) * fg;
gcg = gcg * (1-cg*cg);
gog = gog * (1-og) * og;
*ih = gig;
*fh = gfg;
*ch = gcg;
*oh = gog;
*gi = gcx;
#else
float gcx = THCNumerics<float>::tanh(H2F(cy));
float gog = H2F(go) * gcx;
gcx = H2F(go) * H2F(og) * ( 1 - gcx*gcx) + H2F(goc);
float gcg = gcx * H2F(fg);
float gfg = gcx * H2F(cg);
float gig = gcx * H2F(cx);
gog = gog * ( (1-H2F(og))*H2F(og) );
gcg = gcg * (1-H2F(cg)*H2F(cg));
gfg = gfg * ( (1-H2F(fg))*H2F(fg) );
gig = gig * ( (1-H2F(ig))*H2F(ig) );
*ih = F2H(gig);
*fh = F2H(gfg);
*ch = F2H(gcg);
*oh = F2H(gog);
*gi = F2H(gcx);
#endif
}
}
// ************ START Create function calls ********** //
#define FILL_FUNCTION(ITYPE, DIM, FUNCTION) FUNCTION(ITYPE, DIM)
#define FILL_DIM(ITYPE, DIM, FUNCTION) \
switch (DIM) { \
case -2: \
FILL_FUNCTION(ITYPE, -2, FUNCTION); \
break; \
case 1: \
FILL_FUNCTION(ITYPE, 1, FUNCTION); \
break; \
case 2: \
FILL_FUNCTION(ITYPE, 2, FUNCTION); \
break; \
default: \
FILL_FUNCTION(ITYPE, -1, FUNCTION); \
break; \
}
#define LSTM_FORWARD(ITYPE, DIM) THNN_(LSTMForward) \
<DATATYPE, ITYPE, DIM> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>> \
(inputI, hiddenI, \
bias1I, bias2I, cxI, hyI, cyI, \
hid_size, totalElements);
#define LSTM_BACKWARD(ITYPE, DIM) THNN_(LSTMBackward) \
<DATATYPE, ITYPE, DIM> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>> \
(storageI, gradingatesI, cxI, cyI, \
gradoutI, gradoutcI, gradincxI, \
hid_size, totalElements);
#define GRU_FORWARD(ITYPE, DIM) THNN_(GRUForward)<DATATYPE, ITYPE, DIM> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>> \
(inputI, hiddenI, bias1I, bias2I, hxI, hyI, storageI, \
hid_size, totalElements);
#define GRU_BACKWARD(ITYPE, DIM) THNN_(GRUBackward) \
<DATATYPE, ITYPE, DIM> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>> \
(gradininputI, gradinhiddenI, gradoutI, gradinhxI, storageI, \
hid_size, totalElements);
// ************ END Create actual function calls ************ //
template<typename INDTYPE>
void THNN_(LSTM_forw_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *cx,
THCTensor *hy,
THCTensor *cy)
{
bool has_bias = (bias1!=NULL);
int maxDim;
if(has_bias){
THCUNN_assertSameGPU(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
maxDim = THNN_(minIndexType)
(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
}else{
THCUNN_assertSameGPU(state, 5, input, hidden, hy, cy, cx);
maxDim = THNN_(minIndexType)
(state, 5, input, hidden, hy, cy, cx);
}
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, cx);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply.");
TensorInfo<DATATYPE, INDTYPE> inputI =
getTensorInfo<THCTensor, INDTYPE>(state, input);
TensorInfo<DATATYPE, INDTYPE> hiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, hidden);
TensorInfo<DATATYPE, INDTYPE> cxI =
getTensorInfo<THCTensor, INDTYPE>(state, cx);
TensorInfo<DATATYPE, INDTYPE> hyI =
getTensorInfo<THCTensor, INDTYPE>(state, hy);
TensorInfo<DATATYPE, INDTYPE> cyI =
getTensorInfo<THCTensor, INDTYPE>(state, cy);
INDTYPE hid_size = cxI.sizes[cxI.dims-1];
if(has_bias){
THAssertMsg( hid_size*4 == THCTensor_(nElement)(state, bias1) &&
hid_size*4 == THCTensor_(nElement)(state, bias2),
"Bias in pointwise operation is an incorrect size, must be 4 x feature size.");
}
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
cxI.collapseDims();
hyI.collapseDims();
cyI.collapseDims();
}
INDTYPE zero[1] = {0};
TensorInfo<DATATYPE, INDTYPE> nullinfo =
TensorInfo<DATATYPE, INDTYPE>(NULL, 1, zero, zero);
TensorInfo<DATATYPE, INDTYPE> bias1I = nullinfo;
TensorInfo<DATATYPE, INDTYPE> bias2I = nullinfo;
if(has_bias){
bias1I = getTensorInfo<THCTensor, INDTYPE>(state, bias1);
bias2I = getTensorInfo<THCTensor, INDTYPE>(state, bias2);
if(maxDim == -2){
bias1I.collapseDims();
bias2I.collapseDims();
}
}
FILL_DIM(INDTYPE, maxDim, LSTM_FORWARD);
}
void THNN_(LSTMFused_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *cx,
THCTensor *hy,
THCTensor *cy)
{
THCTensor_(resizeAs)(state, hy, cx);
THCTensor_(resizeAs)(state, cy, cx);
THNN_(FusedRNNAssertSizes)(state, 4, 5, input, hidden, hy, cy, cx);
bool has_bias = (bias1!=NULL);
bool canUse32bi;
if(has_bias){
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
}else{
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 5, input, hidden, hy, cy, cx);
}
if(canUse32bi){
THNN_(LSTM_forw_ind_wrap)<uint32_t>
(state, input, hidden, bias1, bias2, cx, hy, cy);
}else{
THNN_(LSTM_forw_ind_wrap)<uint64_t>
(state, input, hidden, bias1, bias2, cx, hy, cy);
}
THCudaCheck(cudaGetLastError());
}
template<typename INDTYPE>
void THNN_(LSTM_back_ind_wrap)(
THCState *state,
THCTensor *storage,
THCTensor *gradInGates,
THCTensor *cx,
THCTensor *cy,
THCTensor *gradOutput,
THCTensor *gradOutputCell,
THCTensor *gradInputCx)
{
int maxDim = THNN_(minIndexType)
(state, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, gradOutput);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply");
TensorInfo<DATATYPE, INDTYPE> storageI =
getTensorInfo<THCTensor, INDTYPE>(state, storage);
TensorInfo<DATATYPE, INDTYPE> gradingatesI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInGates);
TensorInfo<DATATYPE, INDTYPE> cxI =
getTensorInfo<THCTensor, INDTYPE>(state, cx);
TensorInfo<DATATYPE, INDTYPE> cyI =
getTensorInfo<THCTensor, INDTYPE>(state, cy);
TensorInfo<DATATYPE, INDTYPE> gradoutI =
getTensorInfo<THCTensor, INDTYPE>(state, gradOutput);
TensorInfo<DATATYPE, INDTYPE> gradoutcI =
getTensorInfo<THCTensor, INDTYPE>(state, gradOutputCell);
TensorInfo<DATATYPE, INDTYPE> gradincxI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInputCx);
INDTYPE hid_size = gradoutI.sizes[gradoutI.dims-1];
if(maxDim == -2){
storageI.collapseDims();
gradingatesI.collapseDims();
cxI.collapseDims();
cyI.collapseDims();
gradoutI.collapseDims();
gradoutcI.collapseDims();
gradincxI.collapseDims();
}
FILL_DIM(INDTYPE, maxDim, LSTM_BACKWARD);
}
void THNN_(LSTMFused_updateGradInput)(
THCState *state,
THCTensor *storage,
THCTensor *gradInGates,
THCTensor *cx,
THCTensor *cy,
THCTensor *gradOutput,
THCTensor *gradOutputCell,
THCTensor *gradInputCx)
{
THCTensor_(resizeAs)(state, gradInputCx, gradOutput);
THCUNN_assertSameGPU(state, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
THNN_(FusedRNNAssertSizes)
(state, 4, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
bool canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
if(canUse32bi){
THNN_(LSTM_back_ind_wrap)<uint32_t>
(state, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
}else{
THNN_(LSTM_back_ind_wrap)<uint64_t>
(state, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
}
THCudaCheck(cudaGetLastError());
}
template<typename INDTYPE>
void THNN_(GRU_forw_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *hx,
THCTensor *hy,
THCTensor *storage)
{
bool has_bias = (bias1!=NULL);
int maxDim;
if(has_bias){
THCUNN_assertSameGPU
(state, 7, input, hidden, hx, hy, bias1, bias2, storage);
maxDim = THNN_(minIndexType)
(state, 7, input, hidden, hx, hy, bias1, bias2, storage);
}else{
THCUNN_assertSameGPU
(state, 5, input, hidden, hx, hy, storage);
maxDim = THNN_(minIndexType)
(state, 5, input, hidden, hx, hy, storage);
}
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, hx);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply.");
TensorInfo<DATATYPE, INDTYPE> inputI =
getTensorInfo<THCTensor, INDTYPE>(state, input);
TensorInfo<DATATYPE, INDTYPE> hiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, hidden);
TensorInfo<DATATYPE, INDTYPE> hxI =
getTensorInfo<THCTensor, INDTYPE>(state, hx);
TensorInfo<DATATYPE, INDTYPE> hyI =
getTensorInfo<THCTensor, INDTYPE>(state, hy);
TensorInfo<DATATYPE, INDTYPE> storageI =
getTensorInfo<THCTensor, INDTYPE>(state, storage);
INDTYPE hid_size = hxI.sizes[hxI.dims-1];
if(has_bias){
THAssertMsg( hid_size*3 == THCTensor_(nElement)(state, bias1) &&
hid_size*3 == THCTensor_(nElement)(state, bias2),
"Bias in pointwise operation is an incorrect size, must be 3 x feature size.");
}
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
hyI.collapseDims();
hxI.collapseDims();
storageI.collapseDims();
}
INDTYPE zero[1] = {0};
TensorInfo<DATATYPE, INDTYPE> nullinfo =
TensorInfo<DATATYPE, INDTYPE>(NULL, 1, zero, zero);
TensorInfo<DATATYPE, INDTYPE> bias1I = nullinfo;
TensorInfo<DATATYPE, INDTYPE> bias2I = nullinfo;
if(has_bias){
bias1I = getTensorInfo<THCTensor, INDTYPE>(state, bias1);
bias2I = getTensorInfo<THCTensor, INDTYPE>(state, bias2);
if(maxDim == -2){
bias1I.collapseDims();
bias2I.collapseDims();
}
}
FILL_DIM(INDTYPE, maxDim, GRU_FORWARD);
}
void THNN_(GRUFused_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *hx,
THCTensor *hy,
THCTensor *storage)
{
THCTensor_(resizeAs)(state, hy, hx);
THNN_(FusedRNNAssertSizes)(state, 3, 4, input, hidden, hx, hy);
THArgCheck(THCTensor_(nElement)(state, storage) ==
THCTensor_(nElement)(state, hx)*5,
3, "Storage tensor for fused kernel was not sized correctly.");
bool has_bias = (bias1!=NULL);
bool canUse32bi;
if(has_bias){
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, input, hidden, hx, hy, bias1, bias2, storage);
}else{
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 5, input, hidden, hx, hy, storage);
}
if(canUse32bi){
THNN_(GRU_forw_ind_wrap)<uint32_t>
(state, input, hidden, bias1, bias2, hx, hy, storage);
}else{
THNN_(GRU_forw_ind_wrap)<uint64_t>
(state, input, hidden, bias1, bias2, hx, hy, storage);
}
THCudaCheck(cudaGetLastError());
}
template<typename INDTYPE>
void THNN_(GRU_back_ind_wrap)(
THCState *state,
THCTensor *gradInInput,
THCTensor *gradInHidden,
THCTensor *gradOutput,
THCTensor *gradInputHx,
THCTensor *storage)
{
int maxDim = THNN_(minIndexType)(state, 5, gradInInput, gradInHidden, gradOutput,
gradInputHx, storage);
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, gradOutput);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply");
TensorInfo<DATATYPE, INDTYPE> gradininputI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInInput);
TensorInfo<DATATYPE, INDTYPE> gradinhiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInHidden);
TensorInfo<DATATYPE, INDTYPE> gradoutI =
getTensorInfo<THCTensor, INDTYPE>(state, gradOutput);
TensorInfo<DATATYPE, INDTYPE> gradinhxI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInputHx);
TensorInfo<DATATYPE, INDTYPE> storageI =
getTensorInfo<THCTensor, INDTYPE>(state, storage);
INDTYPE hid_size = gradoutI.sizes[gradoutI.dims-1];
if(maxDim == -2){
gradininputI.collapseDims();
gradinhiddenI.collapseDims();
gradoutI.collapseDims();
gradinhxI.collapseDims();
storageI.collapseDims();
}
FILL_DIM(INDTYPE, maxDim, GRU_BACKWARD);
}
void THNN_(GRUFused_updateGradInput)(
THCState *state,
THCTensor *gradInInput,
THCTensor *gradInHidden,
THCTensor *gradOutput,
THCTensor *gradInputHx,
THCTensor *storage)
{
THCTensor_(resizeAs)(state, gradInputHx, gradOutput);
THCUNN_assertSameGPU(state, 5, gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
THNN_(FusedRNNAssertSizes)(state, 3, 4, gradInInput, gradInHidden, gradOutput, gradInputHx);
bool canUse32bi = THNN_(canUse32BitIndexMath)(state, 5, gradInInput, gradInHidden,
gradOutput, gradInputHx, storage);
if(canUse32bi){
THNN_(GRU_back_ind_wrap)<uint32_t>
(state, gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
}else{
THNN_(GRU_back_ind_wrap)<uint64_t>
(state, gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
}
THCudaCheck(cudaGetLastError());
}
//Clean up compiler namespace
#undef DEVICE_LINEAR_GET
#undef H2F
#undef F2H
#undef EXPAND_FUNCTION
#undef EXPAND_DIM
#undef EXPAND_TYPE
#undef FILL_TYPES_FORWARD
#undef FILL_FORWARD
#undef FILL_TYPES_BACKWARD
#undef FILL_BACKWARD
#endif
|
0b98b70448e5133f30617014e70d3846811e691b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define ARRAY_SIZE 64
__global__ void SquareKernel(float *d_out, float *d_in)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float val = d_in[idx];
d_out[idx] = val * val;
}
int main()
{
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// Allocate the array on the host
float *h_in, *h_out;
h_in = (float *) malloc(ARRAY_BYTES);
h_out = (float *) malloc(ARRAY_BYTES);
for (int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = (float) i;
}
// Allocate arrays onto the device
float *d_in, *d_out;
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// Launch the kernel
dim3 gridDim(ARRAY_SIZE / 2, 1, 1);
dim3 blockDim(ARRAY_SIZE / 2, 1, 1);
hipLaunchKernelGGL(( SquareKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_out, d_in);
// Copy back the results
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// Print the results
for (int i = 0; i < ARRAY_SIZE; i++)
{
printf ("%f", h_out[i]);
printf ((i % 4 == 3)? "\n" : "\t");
}
free(h_in);
free(h_out);
hipFree(d_in);
hipFree(d_out);
return 0;
} | 0b98b70448e5133f30617014e70d3846811e691b.cu | #include <stdio.h>
#include <cuda.h>
#define ARRAY_SIZE 64
__global__ void SquareKernel(float *d_out, float *d_in)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float val = d_in[idx];
d_out[idx] = val * val;
}
int main()
{
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// Allocate the array on the host
float *h_in, *h_out;
h_in = (float *) malloc(ARRAY_BYTES);
h_out = (float *) malloc(ARRAY_BYTES);
for (int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = (float) i;
}
// Allocate arrays onto the device
float *d_in, *d_out;
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// Launch the kernel
dim3 gridDim(ARRAY_SIZE / 2, 1, 1);
dim3 blockDim(ARRAY_SIZE / 2, 1, 1);
SquareKernel<<<gridDim, blockDim>>> (d_out, d_in);
// Copy back the results
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// Print the results
for (int i = 0; i < ARRAY_SIZE; i++)
{
printf ("%f", h_out[i]);
printf ((i % 4 == 3)? "\n" : "\t");
}
free(h_in);
free(h_out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
} |
52c17749db337150209219d924b970b0cdd7ac19.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdlib>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
inline __device__ int indirect(int *c, int i) {
// return c[c[i & 127] & 127] + i;
return int(exp(((((float(i))))) * 1e-18)) + i;
// printf("%d\n", c[i % m] - i % m + i - i);
// return i;
}
__constant__ int const_c[m];
__global__ void fd(float *a, float *b, int *c, int n) {
__shared__ float b_s[m];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m) {
b_s[i] = 0;
}
__syncthreads();
/*
if (threadIdx.x < m) {
b_s[threadIdx.x] = c[threadIdx.x];
}
__syncthreads();
*/
/*
float sum = 0;
if (i > 0)
sum += a[indirect(c, i) - 1];
*/
// sum += a[indirect(c, i)];
// sum += a[i + b_s[i & 127]];
/*
if (i < n - 1)
sum += a[indirect(c, i) + 1];
*/
// b[i] = (i * 1e-18);
// b[i] = i;
// b[i] = c[c[c[i & 64]]];
// atomicAdd(b_s + ((unsigned)i * 34252345627) % m, 1.0f);
// i = int(((((i * 1e-20f)))));
// i = (i * 1e-10f);
// i = i * i * i * i * i % m;
// b_s[i % m] = 1;
//#define C(x) i += (i >> x);
//#define C(x) i += (i >> x);
// for (int t = 0; t < 240; t++)
// C(30);
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
for (int j = 0; j < 27; j++) {
atomicAdd(b_s + (unsigned int)(i / 4 + j * 431) % (m / 1), 1.0f);
}
__syncthreads();
if (i < m) {
atomicAdd(&b[i], b_s[i]);
}
// atomicAdd(b + i % (m * m), 1);
/*
atomicAdd(&b_s[0], sqrt(sum));
if (threadIdx.x < m) {
atomicAdd(b + threadIdx.x, b_s[threadIdx.x]);
// b[threadIdx.x] += b_s[threadIdx.x];
}
*/
}
int main() {
int n = 128 * 1024 * 1024;
float *a, *b;
int *c;
hipMallocManaged(&a, n * sizeof(float));
hipMallocManaged(&b, n * sizeof(float));
hipMallocManaged(&c, m * sizeof(float));
for (int i = 0; i < n; i++) {
a[i] = i * 1e-5f;
}
for (int i = 0; i < n; i++) {
b[i] = i * 1e-5f;
}
for (int i = 0; i < m; i++) {
c[i] = 0;
}
hipMemcpyToSymbol(const_c, c, m * sizeof(float), 0, hipMemcpyHostToDevice);
for (auto bs : {256, 512, 1024}) {
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 4; i++) {
auto t = get_time();
hipLaunchKernelGGL(( fd), dim3(n / bs), dim3(bs), 0, 0, a, b, c, n);
hipDeviceSynchronize();
t = get_time() - t;
printf("%.2f ms bw %.3f GB/s\n", t * 1000,
n * 2.0f * 4 / t / (1024 * 1024 * 1024.0f));
}
std::cout << std::endl;
}
}
| 52c17749db337150209219d924b970b0cdd7ac19.cu | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <cuda_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
inline __device__ int indirect(int *c, int i) {
// return c[c[i & 127] & 127] + i;
return int(exp(((((float(i))))) * 1e-18)) + i;
// printf("%d\n", c[i % m] - i % m + i - i);
// return i;
}
__constant__ int const_c[m];
__global__ void fd(float *a, float *b, int *c, int n) {
__shared__ float b_s[m];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m) {
b_s[i] = 0;
}
__syncthreads();
/*
if (threadIdx.x < m) {
b_s[threadIdx.x] = c[threadIdx.x];
}
__syncthreads();
*/
/*
float sum = 0;
if (i > 0)
sum += a[indirect(c, i) - 1];
*/
// sum += a[indirect(c, i)];
// sum += a[i + b_s[i & 127]];
/*
if (i < n - 1)
sum += a[indirect(c, i) + 1];
*/
// b[i] = (i * 1e-18);
// b[i] = i;
// b[i] = c[c[c[i & 64]]];
// atomicAdd(b_s + ((unsigned)i * 34252345627) % m, 1.0f);
// i = int(((((i * 1e-20f)))));
// i = (i * 1e-10f);
// i = i * i * i * i * i % m;
// b_s[i % m] = 1;
//#define C(x) i += (i >> x);
//#define C(x) i += (i >> x);
// for (int t = 0; t < 240; t++)
// C(30);
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
for (int j = 0; j < 27; j++) {
atomicAdd(b_s + (unsigned int)(i / 4 + j * 431) % (m / 1), 1.0f);
}
__syncthreads();
if (i < m) {
atomicAdd(&b[i], b_s[i]);
}
// atomicAdd(b + i % (m * m), 1);
/*
atomicAdd(&b_s[0], sqrt(sum));
if (threadIdx.x < m) {
atomicAdd(b + threadIdx.x, b_s[threadIdx.x]);
// b[threadIdx.x] += b_s[threadIdx.x];
}
*/
}
int main() {
int n = 128 * 1024 * 1024;
float *a, *b;
int *c;
cudaMallocManaged(&a, n * sizeof(float));
cudaMallocManaged(&b, n * sizeof(float));
cudaMallocManaged(&c, m * sizeof(float));
for (int i = 0; i < n; i++) {
a[i] = i * 1e-5f;
}
for (int i = 0; i < n; i++) {
b[i] = i * 1e-5f;
}
for (int i = 0; i < m; i++) {
c[i] = 0;
}
cudaMemcpyToSymbol(const_c, c, m * sizeof(float), 0, cudaMemcpyHostToDevice);
for (auto bs : {256, 512, 1024}) {
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 4; i++) {
auto t = get_time();
fd<<<n / bs, bs>>>(a, b, c, n);
cudaDeviceSynchronize();
t = get_time() - t;
printf("%.2f ms bw %.3f GB/s\n", t * 1000,
n * 2.0f * 4 / t / (1024 * 1024 * 1024.0f));
}
std::cout << std::endl;
}
}
|
e9814b72a48e8e6d8f6830ca41106a069095004b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/common/logger.hpp>
#include <cuml/neighbors/knn_sparse.hpp>
#include <raft/sparse/selection/knn.cuh>
#include <cusparse_v2.h>
namespace ML {
namespace Sparse {
void brute_force_knn(raft::handle_t &handle, const int *idx_indptr,
const int *idx_indices, const float *idx_data,
size_t idx_nnz, int n_idx_rows, int n_idx_cols,
const int *query_indptr, const int *query_indices,
const float *query_data, size_t query_nnz,
int n_query_rows, int n_query_cols, int *output_indices,
float *output_dists, int k,
size_t batch_size_index, // approx 1M
size_t batch_size_query,
raft::distance::DistanceType metric, float metricArg) {
auto d_alloc = handle.get_device_allocator();
hipsparseHandle_t cusparse_handle = handle.get_cusparse_handle();
hipStream_t stream = handle.get_stream();
raft::sparse::selection::brute_force_knn(
idx_indptr, idx_indices, idx_data, idx_nnz, n_idx_rows, n_idx_cols,
query_indptr, query_indices, query_data, query_nnz, n_query_rows,
n_query_cols, output_indices, output_dists, k, cusparse_handle, d_alloc,
stream, batch_size_index, batch_size_query, metric, metricArg);
}
}; // namespace Sparse
}; // namespace ML
| e9814b72a48e8e6d8f6830ca41106a069095004b.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/common/logger.hpp>
#include <cuml/neighbors/knn_sparse.hpp>
#include <raft/sparse/selection/knn.cuh>
#include <cusparse_v2.h>
namespace ML {
namespace Sparse {
void brute_force_knn(raft::handle_t &handle, const int *idx_indptr,
const int *idx_indices, const float *idx_data,
size_t idx_nnz, int n_idx_rows, int n_idx_cols,
const int *query_indptr, const int *query_indices,
const float *query_data, size_t query_nnz,
int n_query_rows, int n_query_cols, int *output_indices,
float *output_dists, int k,
size_t batch_size_index, // approx 1M
size_t batch_size_query,
raft::distance::DistanceType metric, float metricArg) {
auto d_alloc = handle.get_device_allocator();
cusparseHandle_t cusparse_handle = handle.get_cusparse_handle();
cudaStream_t stream = handle.get_stream();
raft::sparse::selection::brute_force_knn(
idx_indptr, idx_indices, idx_data, idx_nnz, n_idx_rows, n_idx_cols,
query_indptr, query_indices, query_data, query_nnz, n_query_rows,
n_query_cols, output_indices, output_dists, k, cusparse_handle, d_alloc,
stream, batch_size_index, batch_size_query, metric, metricArg);
}
}; // namespace Sparse
}; // namespace ML
|
f4bbf2ce140303ab16753e7f6454fbe17737c185.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_cell_kernel2_ydir;
int xdim0_advec_cell_kernel2_ydir_h = -1;
__constant__ int ydim0_advec_cell_kernel2_ydir;
int ydim0_advec_cell_kernel2_ydir_h = -1;
__constant__ int xdim1_advec_cell_kernel2_ydir;
int xdim1_advec_cell_kernel2_ydir_h = -1;
__constant__ int ydim1_advec_cell_kernel2_ydir;
int ydim1_advec_cell_kernel2_ydir_h = -1;
__constant__ int xdim2_advec_cell_kernel2_ydir;
int xdim2_advec_cell_kernel2_ydir_h = -1;
__constant__ int ydim2_advec_cell_kernel2_ydir;
int ydim2_advec_cell_kernel2_ydir_h = -1;
__constant__ int xdim3_advec_cell_kernel2_ydir;
int xdim3_advec_cell_kernel2_ydir_h = -1;
__constant__ int ydim3_advec_cell_kernel2_ydir;
int ydim3_advec_cell_kernel2_ydir_h = -1;
__constant__ int xdim4_advec_cell_kernel2_ydir;
int xdim4_advec_cell_kernel2_ydir_h = -1;
__constant__ int ydim4_advec_cell_kernel2_ydir;
int ydim4_advec_cell_kernel2_ydir_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel2_ydir*(y)+xdim0_advec_cell_kernel2_ydir*ydim0_advec_cell_kernel2_ydir*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel2_ydir*(y)+xdim1_advec_cell_kernel2_ydir*ydim1_advec_cell_kernel2_ydir*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel2_ydir*(y)+xdim2_advec_cell_kernel2_ydir*ydim2_advec_cell_kernel2_ydir*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel2_ydir*(y)+xdim3_advec_cell_kernel2_ydir*ydim3_advec_cell_kernel2_ydir*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_advec_cell_kernel2_ydir*(y)+xdim4_advec_cell_kernel2_ydir*ydim4_advec_cell_kernel2_ydir*(z))
//user function
__device__
inline void advec_cell_kernel2_ydir_gpu( double *pre_vol, double *post_vol, const double *volume,
const double *vol_flux_y, const double *vol_flux_x) {
pre_vol[OPS_ACC0(0,0,0)] = volume[OPS_ACC2(0,0,0)] + vol_flux_y[OPS_ACC3(0,1,0)] - vol_flux_y[OPS_ACC3(0,0,0)]
+ vol_flux_x[OPS_ACC4(1,0,0)] - vol_flux_x[OPS_ACC4(0,0,0)];
post_vol[OPS_ACC1(0,0,0)]= pre_vol[OPS_ACC0(0,0,0)]-(vol_flux_y[OPS_ACC3(0,1,0)] - vol_flux_y[OPS_ACC3(0,0,0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_advec_cell_kernel2_ydir(
double* __restrict arg0,
double* __restrict arg1,
const double* __restrict arg2,
const double* __restrict arg3,
const double* __restrict arg4,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim0_advec_cell_kernel2_ydir * ydim0_advec_cell_kernel2_ydir;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim1_advec_cell_kernel2_ydir * ydim1_advec_cell_kernel2_ydir;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim2_advec_cell_kernel2_ydir * ydim2_advec_cell_kernel2_ydir;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim3_advec_cell_kernel2_ydir * ydim3_advec_cell_kernel2_ydir;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim4_advec_cell_kernel2_ydir * ydim4_advec_cell_kernel2_ydir;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_cell_kernel2_ydir_gpu(arg0, arg1, arg2, arg3,
arg4);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_cell_kernel2_ydir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4) {
#else
void ops_par_loop_advec_cell_kernel2_ydir_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,5,range,113)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(113,"advec_cell_kernel2_ydir");
OPS_kernels[113].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != xdim0_advec_cell_kernel2_ydir_h || ydim0 != ydim0_advec_cell_kernel2_ydir_h || xdim1 != xdim1_advec_cell_kernel2_ydir_h || ydim1 != ydim1_advec_cell_kernel2_ydir_h || xdim2 != xdim2_advec_cell_kernel2_ydir_h || ydim2 != ydim2_advec_cell_kernel2_ydir_h || xdim3 != xdim3_advec_cell_kernel2_ydir_h || ydim3 != ydim3_advec_cell_kernel2_ydir_h || xdim4 != xdim4_advec_cell_kernel2_ydir_h || ydim4 != ydim4_advec_cell_kernel2_ydir_h) {
hipMemcpyToSymbol( xdim0_advec_cell_kernel2_ydir, &xdim0, sizeof(int) );
xdim0_advec_cell_kernel2_ydir_h = xdim0;
hipMemcpyToSymbol( ydim0_advec_cell_kernel2_ydir, &ydim0, sizeof(int) );
ydim0_advec_cell_kernel2_ydir_h = ydim0;
hipMemcpyToSymbol( xdim1_advec_cell_kernel2_ydir, &xdim1, sizeof(int) );
xdim1_advec_cell_kernel2_ydir_h = xdim1;
hipMemcpyToSymbol( ydim1_advec_cell_kernel2_ydir, &ydim1, sizeof(int) );
ydim1_advec_cell_kernel2_ydir_h = ydim1;
hipMemcpyToSymbol( xdim2_advec_cell_kernel2_ydir, &xdim2, sizeof(int) );
xdim2_advec_cell_kernel2_ydir_h = xdim2;
hipMemcpyToSymbol( ydim2_advec_cell_kernel2_ydir, &ydim2, sizeof(int) );
ydim2_advec_cell_kernel2_ydir_h = ydim2;
hipMemcpyToSymbol( xdim3_advec_cell_kernel2_ydir, &xdim3, sizeof(int) );
xdim3_advec_cell_kernel2_ydir_h = xdim3;
hipMemcpyToSymbol( ydim3_advec_cell_kernel2_ydir, &ydim3, sizeof(int) );
ydim3_advec_cell_kernel2_ydir_h = ydim3;
hipMemcpyToSymbol( xdim4_advec_cell_kernel2_ydir, &xdim4, sizeof(int) );
xdim4_advec_cell_kernel2_ydir_h = xdim4;
hipMemcpyToSymbol( ydim4_advec_cell_kernel2_ydir, &ydim4, sizeof(int) );
ydim4_advec_cell_kernel2_ydir_h = ydim4;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[5];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args,5,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[113].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_advec_cell_kernel2_ydir), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[113].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[113].mpi_time += t2-t1;
OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_cell_kernel2_ydir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 113;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 113;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 5;
desc->args = (ops_arg*)malloc(5*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->function = ops_par_loop_advec_cell_kernel2_ydir_execute;
if (OPS_diags > 1) {
ops_timing_realloc(113,"advec_cell_kernel2_ydir");
}
ops_enqueue_kernel(desc);
}
#endif
| f4bbf2ce140303ab16753e7f6454fbe17737c185.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_advec_cell_kernel2_ydir;
int xdim0_advec_cell_kernel2_ydir_h = -1;
__constant__ int ydim0_advec_cell_kernel2_ydir;
int ydim0_advec_cell_kernel2_ydir_h = -1;
__constant__ int xdim1_advec_cell_kernel2_ydir;
int xdim1_advec_cell_kernel2_ydir_h = -1;
__constant__ int ydim1_advec_cell_kernel2_ydir;
int ydim1_advec_cell_kernel2_ydir_h = -1;
__constant__ int xdim2_advec_cell_kernel2_ydir;
int xdim2_advec_cell_kernel2_ydir_h = -1;
__constant__ int ydim2_advec_cell_kernel2_ydir;
int ydim2_advec_cell_kernel2_ydir_h = -1;
__constant__ int xdim3_advec_cell_kernel2_ydir;
int xdim3_advec_cell_kernel2_ydir_h = -1;
__constant__ int ydim3_advec_cell_kernel2_ydir;
int ydim3_advec_cell_kernel2_ydir_h = -1;
__constant__ int xdim4_advec_cell_kernel2_ydir;
int xdim4_advec_cell_kernel2_ydir_h = -1;
__constant__ int ydim4_advec_cell_kernel2_ydir;
int ydim4_advec_cell_kernel2_ydir_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel2_ydir*(y)+xdim0_advec_cell_kernel2_ydir*ydim0_advec_cell_kernel2_ydir*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel2_ydir*(y)+xdim1_advec_cell_kernel2_ydir*ydim1_advec_cell_kernel2_ydir*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel2_ydir*(y)+xdim2_advec_cell_kernel2_ydir*ydim2_advec_cell_kernel2_ydir*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel2_ydir*(y)+xdim3_advec_cell_kernel2_ydir*ydim3_advec_cell_kernel2_ydir*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_advec_cell_kernel2_ydir*(y)+xdim4_advec_cell_kernel2_ydir*ydim4_advec_cell_kernel2_ydir*(z))
//user function
__device__
inline void advec_cell_kernel2_ydir_gpu( double *pre_vol, double *post_vol, const double *volume,
const double *vol_flux_y, const double *vol_flux_x) {
pre_vol[OPS_ACC0(0,0,0)] = volume[OPS_ACC2(0,0,0)] + vol_flux_y[OPS_ACC3(0,1,0)] - vol_flux_y[OPS_ACC3(0,0,0)]
+ vol_flux_x[OPS_ACC4(1,0,0)] - vol_flux_x[OPS_ACC4(0,0,0)];
post_vol[OPS_ACC1(0,0,0)]= pre_vol[OPS_ACC0(0,0,0)]-(vol_flux_y[OPS_ACC3(0,1,0)] - vol_flux_y[OPS_ACC3(0,0,0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_advec_cell_kernel2_ydir(
double* __restrict arg0,
double* __restrict arg1,
const double* __restrict arg2,
const double* __restrict arg3,
const double* __restrict arg4,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim0_advec_cell_kernel2_ydir * ydim0_advec_cell_kernel2_ydir;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim1_advec_cell_kernel2_ydir * ydim1_advec_cell_kernel2_ydir;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim2_advec_cell_kernel2_ydir * ydim2_advec_cell_kernel2_ydir;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim3_advec_cell_kernel2_ydir * ydim3_advec_cell_kernel2_ydir;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim4_advec_cell_kernel2_ydir * ydim4_advec_cell_kernel2_ydir;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_cell_kernel2_ydir_gpu(arg0, arg1, arg2, arg3,
arg4);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_cell_kernel2_ydir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4) {
#else
void ops_par_loop_advec_cell_kernel2_ydir_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,5,range,113)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(113,"advec_cell_kernel2_ydir");
OPS_kernels[113].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != xdim0_advec_cell_kernel2_ydir_h || ydim0 != ydim0_advec_cell_kernel2_ydir_h || xdim1 != xdim1_advec_cell_kernel2_ydir_h || ydim1 != ydim1_advec_cell_kernel2_ydir_h || xdim2 != xdim2_advec_cell_kernel2_ydir_h || ydim2 != ydim2_advec_cell_kernel2_ydir_h || xdim3 != xdim3_advec_cell_kernel2_ydir_h || ydim3 != ydim3_advec_cell_kernel2_ydir_h || xdim4 != xdim4_advec_cell_kernel2_ydir_h || ydim4 != ydim4_advec_cell_kernel2_ydir_h) {
cudaMemcpyToSymbol( xdim0_advec_cell_kernel2_ydir, &xdim0, sizeof(int) );
xdim0_advec_cell_kernel2_ydir_h = xdim0;
cudaMemcpyToSymbol( ydim0_advec_cell_kernel2_ydir, &ydim0, sizeof(int) );
ydim0_advec_cell_kernel2_ydir_h = ydim0;
cudaMemcpyToSymbol( xdim1_advec_cell_kernel2_ydir, &xdim1, sizeof(int) );
xdim1_advec_cell_kernel2_ydir_h = xdim1;
cudaMemcpyToSymbol( ydim1_advec_cell_kernel2_ydir, &ydim1, sizeof(int) );
ydim1_advec_cell_kernel2_ydir_h = ydim1;
cudaMemcpyToSymbol( xdim2_advec_cell_kernel2_ydir, &xdim2, sizeof(int) );
xdim2_advec_cell_kernel2_ydir_h = xdim2;
cudaMemcpyToSymbol( ydim2_advec_cell_kernel2_ydir, &ydim2, sizeof(int) );
ydim2_advec_cell_kernel2_ydir_h = ydim2;
cudaMemcpyToSymbol( xdim3_advec_cell_kernel2_ydir, &xdim3, sizeof(int) );
xdim3_advec_cell_kernel2_ydir_h = xdim3;
cudaMemcpyToSymbol( ydim3_advec_cell_kernel2_ydir, &ydim3, sizeof(int) );
ydim3_advec_cell_kernel2_ydir_h = ydim3;
cudaMemcpyToSymbol( xdim4_advec_cell_kernel2_ydir, &xdim4, sizeof(int) );
xdim4_advec_cell_kernel2_ydir_h = xdim4;
cudaMemcpyToSymbol( ydim4_advec_cell_kernel2_ydir, &ydim4, sizeof(int) );
ydim4_advec_cell_kernel2_ydir_h = ydim4;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[5];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args,5,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[113].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_advec_cell_kernel2_ydir<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[113].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[113].mpi_time += t2-t1;
OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_cell_kernel2_ydir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 113;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 113;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 5;
desc->args = (ops_arg*)malloc(5*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->function = ops_par_loop_advec_cell_kernel2_ydir_execute;
if (OPS_diags > 1) {
ops_timing_realloc(113,"advec_cell_kernel2_ydir");
}
ops_enqueue_kernel(desc);
}
#endif
|
94faa4d7859effc960fbc4c9338b1772f1e7446a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_plus_4_front;
int xdim0_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_plus_4_front;
int ydim0_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_plus_4_front;
int xdim1_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_plus_4_front;
int ydim1_update_halo_kernel2_yvel_plus_4_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_yvel_plus_4_front * (y) + \
xdim0_update_halo_kernel2_yvel_plus_4_front * \
ydim0_update_halo_kernel2_yvel_plus_4_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_yvel_plus_4_front * (y) + \
xdim1_update_halo_kernel2_yvel_plus_4_front * \
ydim1_update_halo_kernel2_yvel_plus_4_front * (z))
// user function
__device__
inline void
update_halo_kernel2_yvel_plus_4_front(double *yvel0, double *yvel1,
const int *fields) {
if (fields[FIELD_YVEL0] == 1)
yvel0[OPS_ACC0(0, 0, 0)] = yvel0[OPS_ACC0(0, 0, -4)];
if (fields[FIELD_YVEL1] == 1)
yvel1[OPS_ACC1(0, 0, 0)] = yvel1[OPS_ACC1(0, 0, -4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_plus_4_front(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front *
ydim0_update_halo_kernel2_yvel_plus_4_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front *
ydim1_update_halo_kernel2_yvel_plus_4_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_plus_4_front(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_yvel_plus_4_front(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 91))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(91, "update_halo_kernel2_yvel_plus_4_front");
OPS_kernels[91].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_front_h ||
ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_front_h ||
xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_front_h ||
ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_front_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_plus_4_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_yvel_plus_4_front_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_plus_4_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_yvel_plus_4_front_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_plus_4_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_yvel_plus_4_front_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_plus_4_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_yvel_plus_4_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[91].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_4_front), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[91].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[91].mpi_time += t2 - t1;
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 94faa4d7859effc960fbc4c9338b1772f1e7446a.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_plus_4_front;
int xdim0_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_plus_4_front;
int ydim0_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_plus_4_front;
int xdim1_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_plus_4_front;
int ydim1_update_halo_kernel2_yvel_plus_4_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_yvel_plus_4_front * (y) + \
xdim0_update_halo_kernel2_yvel_plus_4_front * \
ydim0_update_halo_kernel2_yvel_plus_4_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_yvel_plus_4_front * (y) + \
xdim1_update_halo_kernel2_yvel_plus_4_front * \
ydim1_update_halo_kernel2_yvel_plus_4_front * (z))
// user function
__device__
inline void
update_halo_kernel2_yvel_plus_4_front(double *yvel0, double *yvel1,
const int *fields) {
if (fields[FIELD_YVEL0] == 1)
yvel0[OPS_ACC0(0, 0, 0)] = yvel0[OPS_ACC0(0, 0, -4)];
if (fields[FIELD_YVEL1] == 1)
yvel1[OPS_ACC1(0, 0, 0)] = yvel1[OPS_ACC1(0, 0, -4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_plus_4_front(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front *
ydim0_update_halo_kernel2_yvel_plus_4_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front *
ydim1_update_halo_kernel2_yvel_plus_4_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_plus_4_front(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_yvel_plus_4_front(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 91))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(91, "update_halo_kernel2_yvel_plus_4_front");
OPS_kernels[91].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_front_h ||
ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_front_h ||
xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_front_h ||
ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_front_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_plus_4_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_yvel_plus_4_front_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_plus_4_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_yvel_plus_4_front_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_plus_4_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_yvel_plus_4_front_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_plus_4_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_yvel_plus_4_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[91].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_yvel_plus_4_front<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[91].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[91].mpi_time += t2 - t1;
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
0c4390588380cd490fd46eeba9efd851a57047fd.hip | // !!! This is a file automatically generated by hipify!!!
/*
Kam Pui So (Anthony)
CS510 GPU
Project Group A
Appliction:
Matrix Addition base on CUDA TOOLKIT Documentation
*/
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime_api.h>
//global
//const int TESTSIZE[] = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024};
//const int MAX_TEST = 11;
const int TEST_LIMIT = 64;
const float MAX_FLOAT = 3.14f;
const int REPEAT = 256;
const int THREAD_LIMIT = 1024;
const int DIMX = 509;
const int DIMM = 1021;
const int DIMY = 2039;
const int SIZE = 32;
// row major matrix struct
typedef struct {
int width;
int height;
float* elements;
} matrix;
typedef struct{
int x;
int y;
} blocksize;
// print divider
void printDivider() {
printf("-------------------------------\n");
}
// create randomize matrix
void createRandomMatrix(matrix sourceMatrix) {
int height = sourceMatrix.height;
int width = sourceMatrix.width;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
// (*sourceMatrix).elements[(y * width) + x] = ((float) x+y) * 0.1;
sourceMatrix.elements[(y * width) + x] = (float) rand() / (float) (RAND_MAX/MAX_FLOAT);
}
}
}
// print matrix
void printMatrix(const matrix valueMatrix) {
int height = valueMatrix.height;
int width = valueMatrix.width;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
printf("%.2f ", valueMatrix.elements[(y * width) + x]);
}
printf("\n");
}
printDivider();
}
// sequential matrix multiplication
void mulMatrix(const matrix A, const matrix B, matrix result) {
int height = result.height;
int intrim = A.width;
int width = result.width;
float value = 0.0f;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
value = 0.0f;
for (int i = 0; i < intrim; ++i) {
value += A.elements[y * intrim + i] * B.elements[i * width + x];
}
result.elements[y * width + x] = value;
}
}
}
// print error code
void printError(char *message, hipError_t error) {
char errorString[255];
strcpy(errorString, hipGetErrorString(error));
if (strcmp(errorString, "no error") == 1)
printf("%s: %s\n", message, hipGetErrorString(error));
}
// Kernel code - matrix multiplication
// A x B = C
__global__ void matrixMulKernel(const matrix A, const matrix B, matrix C) {
int height = C.height;
int intrim = A.width;
int width = C.width;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float value = 0;
// check if row & col are within matrix size
if ((row > height) || (col > width)) return;
for (int i = 0; i < intrim; ++i)
value += A.elements[row * intrim + i] * B.elements[i * width + col];
C.elements[row * width + col] = value;
}
// Host code - matrix multiplicattion
// A x B = C
// block size is determine at runtime
void matrixMulHost(const matrix A, const matrix B, matrix C, const blocksize dimension) {
// variable declaration
matrix A_device, B_device, C_device;
hipError_t err;
int height = C.height;
int intrim = A.width;
int width = C.width;
size_t size;
A_device.width = B_device.height = intrim;
B_device.width = C_device.width = width;
A_device.height = C_device.height = height;
// profiler start
hipProfilerStart();
// load A and B to device memory
size = height * intrim * sizeof(float);
err = hipMalloc(&A_device.elements, size);
printError("CUDA malloc A", err);
err = hipMemcpy(A_device.elements, A.elements, size, hipMemcpyHostToDevice);
printError("Copy A to device", err);
size = intrim * width * sizeof(float);
err = hipMalloc(&B_device.elements, size);
printError("CUDA malloc B", err);
err = hipMemcpy(B_device.elements, B.elements, size, hipMemcpyHostToDevice);
printError("Copy B to device", err);
// allocate C in device memory
size = height * width * sizeof(float);
err = hipMalloc(&C_device.elements, size);
printError("CUDA malloc C", err);
// invoke kernel
dim3 dimBlock(dimension.x, dimension.y);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( matrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, A_device, B_device, C_device);
err = hipDeviceSynchronize();
printError("Run kernel", err);
// read C back from device memory
err = hipMemcpy(C.elements, C_device.elements, size, hipMemcpyDeviceToHost);
printError("Copy C off of device", err);
// free device memory
hipFree(A_device.elements);
hipFree(B_device.elements);
hipFree(C_device.elements);
// profiler stop
hipProfilerStop();
}
// print result
void printResult(const timeval start, const timeval end, const blocksize testSize) {
printf("Result (x y micro-second), %d, %d, %ld\n", testSize.x, testSize.y, ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec )));
}
// run sizing test on kernel
void runSizeTest(const matrix A, const matrix B, matrix C) {
blocksize currentSize;
int i = 0;
// int x, y;
struct timeval start, end;
// set up test loop
while ( i < REPEAT) {
/*
x = rand() % MAX_TEST;
y = rand() % MAX_TEST;
currentSize.x = TESTSIZE[x];
currentSize.y = TESTSIZE[y];
*/
currentSize.x = SIZE;
do {
currentSize.y = rand() % TEST_LIMIT + 1;
} while ((currentSize.x * currentSize.y) > THREAD_LIMIT);
gettimeofday(&start, NULL);
matrixMulHost(A, B, C, currentSize);
gettimeofday(&end, NULL);
printResult(start, end, currentSize);
// printMatrix(C);
++i;
}
}
// main function
// usage ./a.out A.height A.width B.width
int main (int argc, char*argv[]) {
matrix A, B, C;
// int dimX = atoi(argv[1]);
// int dimM = atoi(argv[2]);
// int dimY = atoi(argv[3]);
int dimX = DIMX;
int dimM = DIMM;
int dimY = DIMY;
printf("dimensions, %d, %d, %d\n", dimX, dimM, dimY);
// initialize random seed
srand(time(NULL));
// setup initial matrix
A.height = dimX;
A.width = dimM;
A.elements = (float*) malloc(dimX * dimM * sizeof(float));
B.height = dimM;
B.width = dimY;
B.elements = (float*) malloc(dimM * dimY * sizeof(float));
C.height = dimX;
C.width = dimY;
C.elements = (float*) malloc(dimX * dimY * sizeof(float));
// create random matrix
createRandomMatrix(A);
createRandomMatrix(B);
// print initial matrix
// printMatrix(A);
// printMatrix(B);
// tranditional addition
// mulMatrix(A, B, C);
// CUDA addition
runSizeTest(A, B, C);
// printMatrix(C);
// free matrix
free(A.elements);
free(B.elements);
free(C.elements);
return 0;
}
| 0c4390588380cd490fd46eeba9efd851a57047fd.cu | /*
Kam Pui So (Anthony)
CS510 GPU
Project Group A
Appliction:
Matrix Addition base on CUDA TOOLKIT Documentation
*/
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cuda_profiler_api.h>
//global
//const int TESTSIZE[] = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024};
//const int MAX_TEST = 11;
const int TEST_LIMIT = 64;
const float MAX_FLOAT = 3.14f;
const int REPEAT = 256;
const int THREAD_LIMIT = 1024;
const int DIMX = 509;
const int DIMM = 1021;
const int DIMY = 2039;
const int SIZE = 32;
// row major matrix struct
typedef struct {
int width;
int height;
float* elements;
} matrix;
typedef struct{
int x;
int y;
} blocksize;
// print divider
void printDivider() {
printf("-------------------------------\n");
}
// create randomize matrix
void createRandomMatrix(matrix sourceMatrix) {
int height = sourceMatrix.height;
int width = sourceMatrix.width;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
// (*sourceMatrix).elements[(y * width) + x] = ((float) x+y) * 0.1;
sourceMatrix.elements[(y * width) + x] = (float) rand() / (float) (RAND_MAX/MAX_FLOAT);
}
}
}
// print matrix
void printMatrix(const matrix valueMatrix) {
int height = valueMatrix.height;
int width = valueMatrix.width;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
printf("%.2f ", valueMatrix.elements[(y * width) + x]);
}
printf("\n");
}
printDivider();
}
// sequential matrix multiplication
void mulMatrix(const matrix A, const matrix B, matrix result) {
int height = result.height;
int intrim = A.width;
int width = result.width;
float value = 0.0f;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
value = 0.0f;
for (int i = 0; i < intrim; ++i) {
value += A.elements[y * intrim + i] * B.elements[i * width + x];
}
result.elements[y * width + x] = value;
}
}
}
// print error code
void printError(char *message, cudaError_t error) {
char errorString[255];
strcpy(errorString, cudaGetErrorString(error));
if (strcmp(errorString, "no error") == 1)
printf("%s: %s\n", message, cudaGetErrorString(error));
}
// Kernel code - matrix multiplication
// A x B = C
__global__ void matrixMulKernel(const matrix A, const matrix B, matrix C) {
int height = C.height;
int intrim = A.width;
int width = C.width;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float value = 0;
// check if row & col are within matrix size
if ((row > height) || (col > width)) return;
for (int i = 0; i < intrim; ++i)
value += A.elements[row * intrim + i] * B.elements[i * width + col];
C.elements[row * width + col] = value;
}
// Host code - matrix multiplicattion
// A x B = C
// block size is determine at runtime
void matrixMulHost(const matrix A, const matrix B, matrix C, const blocksize dimension) {
// variable declaration
matrix A_device, B_device, C_device;
cudaError_t err;
int height = C.height;
int intrim = A.width;
int width = C.width;
size_t size;
A_device.width = B_device.height = intrim;
B_device.width = C_device.width = width;
A_device.height = C_device.height = height;
// profiler start
cudaProfilerStart();
// load A and B to device memory
size = height * intrim * sizeof(float);
err = cudaMalloc(&A_device.elements, size);
printError("CUDA malloc A", err);
err = cudaMemcpy(A_device.elements, A.elements, size, cudaMemcpyHostToDevice);
printError("Copy A to device", err);
size = intrim * width * sizeof(float);
err = cudaMalloc(&B_device.elements, size);
printError("CUDA malloc B", err);
err = cudaMemcpy(B_device.elements, B.elements, size, cudaMemcpyHostToDevice);
printError("Copy B to device", err);
// allocate C in device memory
size = height * width * sizeof(float);
err = cudaMalloc(&C_device.elements, size);
printError("CUDA malloc C", err);
// invoke kernel
dim3 dimBlock(dimension.x, dimension.y);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y);
matrixMulKernel<<<dimGrid, dimBlock>>>(A_device, B_device, C_device);
err = cudaThreadSynchronize();
printError("Run kernel", err);
// read C back from device memory
err = cudaMemcpy(C.elements, C_device.elements, size, cudaMemcpyDeviceToHost);
printError("Copy C off of device", err);
// free device memory
cudaFree(A_device.elements);
cudaFree(B_device.elements);
cudaFree(C_device.elements);
// profiler stop
cudaProfilerStop();
}
// print result
void printResult(const timeval start, const timeval end, const blocksize testSize) {
printf("Result (x y micro-second), %d, %d, %ld\n", testSize.x, testSize.y, ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec )));
}
// run sizing test on kernel
void runSizeTest(const matrix A, const matrix B, matrix C) {
blocksize currentSize;
int i = 0;
// int x, y;
struct timeval start, end;
// set up test loop
while ( i < REPEAT) {
/*
x = rand() % MAX_TEST;
y = rand() % MAX_TEST;
currentSize.x = TESTSIZE[x];
currentSize.y = TESTSIZE[y];
*/
currentSize.x = SIZE;
do {
currentSize.y = rand() % TEST_LIMIT + 1;
} while ((currentSize.x * currentSize.y) > THREAD_LIMIT);
gettimeofday(&start, NULL);
matrixMulHost(A, B, C, currentSize);
gettimeofday(&end, NULL);
printResult(start, end, currentSize);
// printMatrix(C);
++i;
}
}
// main function
// usage ./a.out A.height A.width B.width
int main (int argc, char*argv[]) {
matrix A, B, C;
// int dimX = atoi(argv[1]);
// int dimM = atoi(argv[2]);
// int dimY = atoi(argv[3]);
int dimX = DIMX;
int dimM = DIMM;
int dimY = DIMY;
printf("dimensions, %d, %d, %d\n", dimX, dimM, dimY);
// initialize random seed
srand(time(NULL));
// setup initial matrix
A.height = dimX;
A.width = dimM;
A.elements = (float*) malloc(dimX * dimM * sizeof(float));
B.height = dimM;
B.width = dimY;
B.elements = (float*) malloc(dimM * dimY * sizeof(float));
C.height = dimX;
C.width = dimY;
C.elements = (float*) malloc(dimX * dimY * sizeof(float));
// create random matrix
createRandomMatrix(A);
createRandomMatrix(B);
// print initial matrix
// printMatrix(A);
// printMatrix(B);
// tranditional addition
// mulMatrix(A, B, C);
// CUDA addition
runSizeTest(A, B, C);
// printMatrix(C);
// free matrix
free(A.elements);
free(B.elements);
free(C.elements);
return 0;
}
|
efecff5d19235f9e6980434efa6db9ddf955c8fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void kernel_add_proj(float *d_a, float *d_b)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_a[idx]=d_a[idx]+d_b[idx];
}
__global__ void kernel_divide_proj(float *h_proj_single, float *h_proj_sumLen, float *h_proj_weightedLen)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
float temp = h_proj_sumLen[idx];
if ( temp < volumn_z*1e-6)
h_proj_single[idx] = 0;
else
{
h_proj_single[idx] = (h_proj_single[idx] - h_proj_weightedLen[idx]) / temp *Number_of_Devices;
}
}
__global__ void forward_ray_driven_3d_kernel_correction_multiGPU(float *d_f , float *d_proj_correction, float *d_proj_data, float sin_theta, float cos_theta, int subPrjIdx, int command)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta
// d_proj_correction: 2D projection correction, (output of this function. i.e. c(i) in the paper)
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source position (X2): coordinate in (x,y,z) system .
float vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
float vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
float vertex_x2_z;
if (FBCT)
vertex_x2_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+Detector_z_idx) * Detector_pixel_x; // FBCT geometry
else
vertex_x2_z = Source_z; // CBCT geometry
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
float vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
float vertex_x1_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+Detector_z_idx) * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
if ( (vertex_x1_x == vertex_x2_x) || (vertex_x1_y == vertex_x2_y) ) //Note: You may rotate the angle to avoid this happening
{
d_proj_correction[proj_pixel_index] = 0.0f ;
// printf("Vertical or Horizontal line occurs! Detector_x_idx:%d, Detector_z_idx:%d/n", Detector_x_idx,Detector_z_idx);
// assert(0);
}
else // if ( (vertex_x1_x != vertex_x2_x) && (vertex_x1_y != vertex_x2_y) )
{
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
if (fabs(vertex_x2_z - vertex_x1_z) < volumn_z*1e-6) // in case x1 and x2 are at the same z position
{
alpha_min = -MAX_infi;
alpha_max = MAX_infi;
// printf("Same horizontal plane occurs! Detector_z_idx:%d/n", Detector_z_idx);
// assert(0);
}
else
{
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
}
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
d_proj_correction[proj_pixel_index] = 0.0f ;
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x > vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y > vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
k_min = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
k_max = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z > vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (fabs(vertex_x1_z-vertex_x2_z) < volumn_z*1e-6)
voxel_k = k_min-1;
else if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end while
if (one_ray_length < volumn_z*1e-6)
d_proj_correction[proj_pixel_index] = 0.0;
else
{
if (command == 0)
d_proj_correction[proj_pixel_index] = one_ray_sum; // forward operator
else if (command == 1)
d_proj_correction[proj_pixel_index] = (d_proj_data[proj_pixel_index] - one_ray_sum)/one_ray_length;
// projection correction (for SART)
}
}//else if
}//else if
// __syncthreads();
}
__global__ void forward_ray_driven_3d_kernel_correction_separate(float *d_f , float *d_proj_sumLen, float *d_proj_weightedLen, float sin_theta, float cos_theta)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta
// d_proj_sumLen: 2D projection correction, (output of this function. i.e. c(i) in the paper)
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source position (X2): coordinate in (x,y,z) system .
float vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
float vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
float vertex_x2_z;
if (FBCT)
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x; // FBCT geometry
else
vertex_x2_z = Source_z; // CBCT geometry
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
float vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
float vertex_x1_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
if ( (vertex_x1_x == vertex_x2_x) || (vertex_x1_y == vertex_x2_y) ) //Note: You may rotate the angle to avoid this happening
{
d_proj_weightedLen[proj_pixel_index] = 0.0f ;
// printf("Vertical or Horizontal line occurs! Detector_x_idx:%d, Detector_z_idx:%d/n", Detector_x_idx,Detector_z_idx);
// assert(0);
}
else // if ( (vertex_x1_x != vertex_x2_x) && (vertex_x1_y != vertex_x2_y) )
{
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
if (fabs(vertex_x2_z - vertex_x1_z) < volumn_z*1e-6) // in case x1 and x2 are at the same z position
{
alpha_min = -MAX_infi;
alpha_max = MAX_infi;
// printf("Same horizontal plane occurs! Detector_z_idx:%d/n", Detector_z_idx);
// assert(0);
}
else
{
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
}
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
d_proj_weightedLen[proj_pixel_index] = 0.0f ;
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x > vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y > vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
k_min = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
k_max = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z > vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (fabs(vertex_x1_z-vertex_x2_z) < volumn_z*1e-6)
voxel_k = k_min-1;
else if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end while
d_proj_sumLen[proj_pixel_index] = one_ray_length;
d_proj_weightedLen[proj_pixel_index] = one_ray_sum;
}//else if
}//else if
// __syncthreads();
}
__global__ void backprj_ray_driven_3d_kernel_multiGPU(float *d_volumn_kernel, float *d_proj_correction, float beta_temp, float sin_theta, float cos_theta, int subVolIdx, int command)
{
/*
* Reference: "Accelerating simultaneous algebraic reconstruction technique with motion compensation using CUDA-enabled GPU"
* Wai-Man Pang, CUHK
* Section: Back-projection and image update
* d_proj_correction : 2D projection correction, i.e. c(i) in the Wai-Man Pang, CUHK paper
* t_theta : projection angle
* beta_temp : lamda in the paper
* d_volumn: 3D object array
* d_volumn(j) = d_volumn(j) + beta_temp * sum_i (c(i)*w(ij)) / sum_i (w(ij)); where i is ray index, j is voxel index
*/
int Idx_voxel_x = threadIdx.x + blockIdx.x * blockDim.x;
int Idx_voxel_y = blockIdx.y;
int Idx_voxel_z = blockIdx.z;
int image_voxel_index = M * N * Idx_voxel_z + M * Idx_voxel_y + Idx_voxel_x;
//coordinate of center of each voxel in x-y-z system
float coord_voxel_x = boundary_voxel_x + volumn_x*0.5f + Idx_voxel_x * volumn_x;
float coord_voxel_y = boundary_voxel_y + volumn_y*0.5f + Idx_voxel_y * volumn_y;
float coord_voxel_z = boundary_voxel_z + volumn_z*(ZETA/Number_of_Devices*subVolIdx+0.5f) + Idx_voxel_z * volumn_z;
/**************************************/
float coord_vertex_x=0.0f, coord_vertex_y=0.0f, coord_vertex_z=0.0f;
float coord_vertex_s=0.0f, coord_vertex_t=0.0f;
float coord_vertexOnDetector_x=0.0f, coord_vertexOnDetector_z=0.0f;
float minY = MAX_infi, minZ=MAX_infi, maxY=-MAX_infi, maxZ=-MAX_infi;
float coord_pixelOnDetector_x=0.0f, coord_pixelOnDetector_y=0.0f, coord_pixelOnDetector_z=0.0f;
float coord_source_x=0.0f, coord_source_y=0.0f, coord_source_z=0.0f;
float alpha_x_i_1=0.0f, alpha_x_i=0.0f;
float alpha_y_i_1=0.0f, alpha_y_i=0.0f;
float alpha_z_i_1=0.0f, alpha_z_i=0.0f;
float alpha_x_temp=0.0f, alpha_y_temp=0.0f, alpha_z_temp=0.0f;
float alpha_min=0.0f, alpha_max=0.0f;
int minY_index=0, maxY_index=0, minZ_index=0, maxZ_index=0;
float sumWeight=0.0f, sumLength=0.0f;
float d_x1_x2=0.0f;
float inv_Detector_pixel = 1.0f/Detector_pixel_x;
int Error;
// float weight = 1.0f;
// float tao;
// float tao_m1 = atan( (float(R)*Detector_pixel_x/2.0f-abs(Offset)) / DSO);
/***********************************************************/
if ( (Idx_voxel_x-(float(M)*0.5f-0.5)-M_Offset)*volumn_x*(Idx_voxel_x-(float(M)*0.5f-0.5)-M_Offset)*volumn_x
+ (Idx_voxel_y-(float(N)*0.5f-0.5))*volumn_y*(Idx_voxel_y-(float(N)*0.5f-0.5))*volumn_y
>= (float(M)*0.5f-0.5)*volumn_x*(float(N)*0.5f-0.5)*volumn_y )
d_volumn_kernel[image_voxel_index] = 0.0f ;
else
// Note: The following codes apply to all the voxels simutaneously
{
coord_source_x = Source_x * cos_theta - Source_y * sin_theta;
coord_source_y = Source_x * sin_theta + Source_y * cos_theta;
if (FBCT)
coord_source_z = coord_voxel_z; // FBCT geometry, multiple sources
else
coord_source_z = Source_z; // CBCT geometry, single source
// coordinate of the source in (x,y,z) system after normal gantry rotation
/******** investigate the eight vertices of each voxel ********/
for (int k=0;k<2;k++)
for (int j=0;j<2;j++)
for (int i=0;i<2;i++)
{
//coordinate for each of eight vertices of the voxel
coord_vertex_x = coord_voxel_x + (i)*volumn_x - 0.5f*volumn_x;
coord_vertex_y = coord_voxel_y + (j)*volumn_y - 0.5f*volumn_y;
coord_vertex_z = coord_voxel_z + (k)*volumn_z - 0.5f*volumn_z;
// <t-s> <----> <x,y>
coord_vertex_t = coord_vertex_x * cos_theta + coord_vertex_y * sin_theta;
coord_vertex_s = - coord_vertex_x * sin_theta + coord_vertex_y * cos_theta;
// Note: Now rotate the image volume (with - t_theata degree) instead of the normal gantry rotation
// In the new coordiantor, detector plane remains and is prependicular to the t axis
// in <t,s> system
coord_vertexOnDetector_x = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_s - Source_y) + coord_vertex_s ;
if (FBCT)
coord_vertexOnDetector_z = coord_voxel_z ; //FBCT geometry, no magnification along z axis
else
coord_vertexOnDetector_z = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_z - Source_z) + coord_vertex_z ; // CBCT geometry
// the projcetion of the vertex of the voxel
minY= fmin(minY, coord_vertexOnDetector_x);
maxY= fmax(maxY, coord_vertexOnDetector_x);
minZ= fmin(minZ, coord_vertexOnDetector_z);
maxZ= fmax(maxZ, coord_vertexOnDetector_z);
// form a minimim bounding rectangle (MBR) for these vertexes
}
minY_index = floor( (minY - Detector_Ymin ) * inv_Detector_pixel +0.5f);
maxY_index = floor( (maxY - Detector_Ymin ) * inv_Detector_pixel +0.5f);
minZ_index = floor( (minZ - Detector_Zmin ) * inv_Detector_pixel +0.5f);
maxZ_index = floor( (maxZ - Detector_Zmin ) * inv_Detector_pixel +0.5f);
// index of pixels of MBR boudaries on the detector
/***********************************/
// If this voxel does not project on this detector plane, it means there is no ray passing throught this voxel at this angle.
if ( (minY_index<0) && (maxY_index <0) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else if ( (minY_index>(R-1)) && (maxY_index >(R-1)) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else if ( (minZ_index<0) && (maxZ_index <0 ) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else if ( (minZ_index>(Z_prj-1)) && (maxZ_index >(Z_prj -1)) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else
// If this voxel projects on the detector plane
{
if (minY_index <=0)
minY_index = 0;
if (maxY_index >=(R-1) )
maxY_index = R-1;
if (minZ_index <=0)
minZ_index = 0;
if (maxZ_index >=(Z_prj-1) )
maxZ_index = Z_prj-1;
// for those projection pixels whose coordinate loacates inside MBR
// Each pixel coorresponds to a ray, and that ray must pass through the specific voxel
for (int j=minZ_index; j<=maxZ_index; j++)
for (int i=minY_index; i<=maxY_index; i++)
{
coord_pixelOnDetector_x = DOD * cos_theta - (Detector_Ymin + i*Detector_pixel_x) * sin_theta ;
coord_pixelOnDetector_y = DOD * sin_theta + (Detector_Ymin + i*Detector_pixel_x) * cos_theta ;
coord_pixelOnDetector_z = Detector_Zmin + j*Detector_pixel_x;
// coordinate of the detector pixel inside MBR in (x,y,z) system after normal gantry rotation
/** Weighted Update for Half Detector **/
// if ( (float(i)*Detector_pixel_x) < 2.0f*abs(Offset) )
// weight = 1.0f;
// else
// {
// tao = atan( ( float(R/2-i)*Detector_pixel_x + abs(Offset) ) / DSO);
// weight = cos(PI/4*(tao/tao_m1 - 1));
// weight = weight * weight;
// }
/******/
// Next: investigate the line starting at x1 and ending at x2
// find out all the rays whose projection lies in the rectangle.
if ( (coord_source_x == coord_pixelOnDetector_x) || (coord_source_y == coord_pixelOnDetector_y) )
// Otherwise you should slightly roate the angle to avoid these situations
{
Error=0;
// assert(Error);
sumWeight = 0.0f;
}
else // if ( (coord_source_x != coord_pixelOnDetector_x) && (coord_source_y != coord_pixelOnDetector_y) )
{
alpha_x_i_1 = ( (coord_voxel_x - 0.5f*volumn_x) - coord_pixelOnDetector_x )/( coord_source_x - coord_pixelOnDetector_x );
alpha_x_i = ( (coord_voxel_x + 0.5f*volumn_x) - coord_pixelOnDetector_x )/( coord_source_x - coord_pixelOnDetector_x );
alpha_y_i_1 = ( (coord_voxel_y - 0.5f*volumn_y) - coord_pixelOnDetector_y )/( coord_source_y - coord_pixelOnDetector_y );
alpha_y_i = ( (coord_voxel_y + 0.5f*volumn_y) - coord_pixelOnDetector_y )/( coord_source_y - coord_pixelOnDetector_y );
alpha_z_i_1 = ( (coord_voxel_z - 0.5f*volumn_z) - coord_pixelOnDetector_z )/( coord_source_z - coord_pixelOnDetector_z );
alpha_z_i = ( (coord_voxel_z + 0.5f*volumn_z) - coord_pixelOnDetector_z )/( coord_source_z - coord_pixelOnDetector_z );
// find out indices of the two most closet x planes near this specific voxel
alpha_x_temp = fmin((alpha_x_i_1), (alpha_x_i));
alpha_y_temp = fmin((alpha_y_i_1), (alpha_y_i));
if (fabs(coord_source_z - coord_pixelOnDetector_z) < volumn_z*1e-6)
alpha_z_temp = -MAX_infi;
else
alpha_z_temp = fmin((alpha_z_i_1), (alpha_z_i));
alpha_min = fmax(fmax(alpha_x_temp, alpha_y_temp), fmax(alpha_y_temp, alpha_z_temp));
// alpha_min is the enter point for one specific voxel
alpha_x_temp = fmax((alpha_x_i_1), (alpha_x_i));
alpha_y_temp = fmax((alpha_y_i_1), (alpha_y_i));
if (fabs(coord_source_z - coord_pixelOnDetector_z) < volumn_z*1e-6)
alpha_z_temp = MAX_infi;
else
alpha_z_temp = fmax((alpha_z_i_1), (alpha_z_i));
alpha_max = fmin(fmin(alpha_x_temp, alpha_y_temp), fmin(alpha_y_temp, alpha_z_temp));
// alpha_max is the exit point of the line passing through this voxel
if (alpha_max-alpha_min>0) // if the value is negative, it means the ray does not pass through this voxel
{
d_x1_x2 = sqrt((coord_source_x-coord_pixelOnDetector_x)*(coord_source_x-coord_pixelOnDetector_x) + (coord_source_y-coord_pixelOnDetector_y)*(coord_source_y - coord_pixelOnDetector_y) + (coord_source_z-coord_pixelOnDetector_z)*(coord_source_z-coord_pixelOnDetector_z) );
float temp = d_x1_x2*(alpha_max-alpha_min);
if ( temp > volumn_x*1e-6)
// the line passes through the voxel with a sufficient length;
{
sumWeight = sumWeight + temp*d_proj_correction[j*R + i];
// Note: d_proj_correction[j*R + i] is c(i) which has been previously calculated
// Note: d_x1_x2 * (alpha_max - alpha_min) is w(i) for ray i of this projection
sumLength = sumLength + temp;
}
}
}
}// end for loop: all the rays whose projection fits in the rectangle
if (sumLength < volumn_x*1e-6)
d_volumn_kernel[image_voxel_index] += 0.0f ;
else
{
if (command==0)
d_volumn_kernel[image_voxel_index] += beta_temp * sumWeight ; // matched ajoint operator, for test use
else if (command==1)
d_volumn_kernel[image_voxel_index] += beta_temp * sumWeight/sumLength ;
}
}//end else if this voxel projects on this detector plane
}//end else if the reconstruction region is in the circle
// __syncthreads();
}
| efecff5d19235f9e6980434efa6db9ddf955c8fb.cu | __global__ void kernel_add_proj(float *d_a, float *d_b)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_a[idx]=d_a[idx]+d_b[idx];
}
__global__ void kernel_divide_proj(float *h_proj_single, float *h_proj_sumLen, float *h_proj_weightedLen)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
float temp = h_proj_sumLen[idx];
if ( temp < volumn_z*1e-6)
h_proj_single[idx] = 0;
else
{
h_proj_single[idx] = (h_proj_single[idx] - h_proj_weightedLen[idx]) / temp *Number_of_Devices;
}
}
__global__ void forward_ray_driven_3d_kernel_correction_multiGPU(float *d_f , float *d_proj_correction, float *d_proj_data, float sin_theta, float cos_theta, int subPrjIdx, int command)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta
// d_proj_correction: 2D projection correction, (output of this function. i.e. c(i) in the paper)
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source position (X2): coordinate in (x,y,z) system .
float vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
float vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
float vertex_x2_z;
if (FBCT)
vertex_x2_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+Detector_z_idx) * Detector_pixel_x; // FBCT geometry
else
vertex_x2_z = Source_z; // CBCT geometry
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
float vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
float vertex_x1_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+Detector_z_idx) * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
if ( (vertex_x1_x == vertex_x2_x) || (vertex_x1_y == vertex_x2_y) ) //Note: You may rotate the angle to avoid this happening
{
d_proj_correction[proj_pixel_index] = 0.0f ;
// printf("Vertical or Horizontal line occurs! Detector_x_idx:%d, Detector_z_idx:%d/n", Detector_x_idx,Detector_z_idx);
// assert(0);
}
else // if ( (vertex_x1_x != vertex_x2_x) && (vertex_x1_y != vertex_x2_y) )
{
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
if (fabs(vertex_x2_z - vertex_x1_z) < volumn_z*1e-6) // in case x1 and x2 are at the same z position
{
alpha_min = -MAX_infi;
alpha_max = MAX_infi;
// printf("Same horizontal plane occurs! Detector_z_idx:%d/n", Detector_z_idx);
// assert(0);
}
else
{
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
}
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
d_proj_correction[proj_pixel_index] = 0.0f ;
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x > vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y > vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
k_min = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
k_max = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z > vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (fabs(vertex_x1_z-vertex_x2_z) < volumn_z*1e-6)
voxel_k = k_min-1;
else if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end while
if (one_ray_length < volumn_z*1e-6)
d_proj_correction[proj_pixel_index] = 0.0;
else
{
if (command == 0)
d_proj_correction[proj_pixel_index] = one_ray_sum; // forward operator
else if (command == 1)
d_proj_correction[proj_pixel_index] = (d_proj_data[proj_pixel_index] - one_ray_sum)/one_ray_length;
// projection correction (for SART)
}
}//else if
}//else if
// __syncthreads();
}
__global__ void forward_ray_driven_3d_kernel_correction_separate(float *d_f , float *d_proj_sumLen, float *d_proj_weightedLen, float sin_theta, float cos_theta)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta
// d_proj_sumLen: 2D projection correction, (output of this function. i.e. c(i) in the paper)
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source position (X2): coordinate in (x,y,z) system .
float vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
float vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
float vertex_x2_z;
if (FBCT)
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x; // FBCT geometry
else
vertex_x2_z = Source_z; // CBCT geometry
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
float vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
float vertex_x1_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
if ( (vertex_x1_x == vertex_x2_x) || (vertex_x1_y == vertex_x2_y) ) //Note: You may rotate the angle to avoid this happening
{
d_proj_weightedLen[proj_pixel_index] = 0.0f ;
// printf("Vertical or Horizontal line occurs! Detector_x_idx:%d, Detector_z_idx:%d/n", Detector_x_idx,Detector_z_idx);
// assert(0);
}
else // if ( (vertex_x1_x != vertex_x2_x) && (vertex_x1_y != vertex_x2_y) )
{
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
if (fabs(vertex_x2_z - vertex_x1_z) < volumn_z*1e-6) // in case x1 and x2 are at the same z position
{
alpha_min = -MAX_infi;
alpha_max = MAX_infi;
// printf("Same horizontal plane occurs! Detector_z_idx:%d/n", Detector_z_idx);
// assert(0);
}
else
{
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
}
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
d_proj_weightedLen[proj_pixel_index] = 0.0f ;
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x > vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y > vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
k_min = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
k_max = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z > vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (fabs(vertex_x1_z-vertex_x2_z) < volumn_z*1e-6)
voxel_k = k_min-1;
else if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end while
d_proj_sumLen[proj_pixel_index] = one_ray_length;
d_proj_weightedLen[proj_pixel_index] = one_ray_sum;
}//else if
}//else if
// __syncthreads();
}
__global__ void backprj_ray_driven_3d_kernel_multiGPU(float *d_volumn_kernel, float *d_proj_correction, float beta_temp, float sin_theta, float cos_theta, int subVolIdx, int command)
{
/*
* Reference: "Accelerating simultaneous algebraic reconstruction technique with motion compensation using CUDA-enabled GPU"
* Wai-Man Pang, CUHK
* Section: Back-projection and image update
* d_proj_correction : 2D projection correction, i.e. c(i) in the Wai-Man Pang, CUHK paper
* t_theta : projection angle
* beta_temp : lamda in the paper
* d_volumn: 3D object array
* d_volumn(j) = d_volumn(j) + beta_temp * sum_i (c(i)*w(ij)) / sum_i (w(ij)); where i is ray index, j is voxel index
*/
int Idx_voxel_x = threadIdx.x + blockIdx.x * blockDim.x;
int Idx_voxel_y = blockIdx.y;
int Idx_voxel_z = blockIdx.z;
int image_voxel_index = M * N * Idx_voxel_z + M * Idx_voxel_y + Idx_voxel_x;
//coordinate of center of each voxel in x-y-z system
float coord_voxel_x = boundary_voxel_x + volumn_x*0.5f + Idx_voxel_x * volumn_x;
float coord_voxel_y = boundary_voxel_y + volumn_y*0.5f + Idx_voxel_y * volumn_y;
float coord_voxel_z = boundary_voxel_z + volumn_z*(ZETA/Number_of_Devices*subVolIdx+0.5f) + Idx_voxel_z * volumn_z;
/**************************************/
float coord_vertex_x=0.0f, coord_vertex_y=0.0f, coord_vertex_z=0.0f;
float coord_vertex_s=0.0f, coord_vertex_t=0.0f;
float coord_vertexOnDetector_x=0.0f, coord_vertexOnDetector_z=0.0f;
float minY = MAX_infi, minZ=MAX_infi, maxY=-MAX_infi, maxZ=-MAX_infi;
float coord_pixelOnDetector_x=0.0f, coord_pixelOnDetector_y=0.0f, coord_pixelOnDetector_z=0.0f;
float coord_source_x=0.0f, coord_source_y=0.0f, coord_source_z=0.0f;
float alpha_x_i_1=0.0f, alpha_x_i=0.0f;
float alpha_y_i_1=0.0f, alpha_y_i=0.0f;
float alpha_z_i_1=0.0f, alpha_z_i=0.0f;
float alpha_x_temp=0.0f, alpha_y_temp=0.0f, alpha_z_temp=0.0f;
float alpha_min=0.0f, alpha_max=0.0f;
int minY_index=0, maxY_index=0, minZ_index=0, maxZ_index=0;
float sumWeight=0.0f, sumLength=0.0f;
float d_x1_x2=0.0f;
float inv_Detector_pixel = 1.0f/Detector_pixel_x;
int Error;
// float weight = 1.0f;
// float tao;
// float tao_m1 = atan( (float(R)*Detector_pixel_x/2.0f-abs(Offset)) / DSO);
/***********************************************************/
if ( (Idx_voxel_x-(float(M)*0.5f-0.5)-M_Offset)*volumn_x*(Idx_voxel_x-(float(M)*0.5f-0.5)-M_Offset)*volumn_x
+ (Idx_voxel_y-(float(N)*0.5f-0.5))*volumn_y*(Idx_voxel_y-(float(N)*0.5f-0.5))*volumn_y
>= (float(M)*0.5f-0.5)*volumn_x*(float(N)*0.5f-0.5)*volumn_y )
d_volumn_kernel[image_voxel_index] = 0.0f ;
else
// Note: The following codes apply to all the voxels simutaneously
{
coord_source_x = Source_x * cos_theta - Source_y * sin_theta;
coord_source_y = Source_x * sin_theta + Source_y * cos_theta;
if (FBCT)
coord_source_z = coord_voxel_z; // FBCT geometry, multiple sources
else
coord_source_z = Source_z; // CBCT geometry, single source
// coordinate of the source in (x,y,z) system after normal gantry rotation
/******** investigate the eight vertices of each voxel ********/
for (int k=0;k<2;k++)
for (int j=0;j<2;j++)
for (int i=0;i<2;i++)
{
//coordinate for each of eight vertices of the voxel
coord_vertex_x = coord_voxel_x + (i)*volumn_x - 0.5f*volumn_x;
coord_vertex_y = coord_voxel_y + (j)*volumn_y - 0.5f*volumn_y;
coord_vertex_z = coord_voxel_z + (k)*volumn_z - 0.5f*volumn_z;
// <t-s> <----> <x,y>
coord_vertex_t = coord_vertex_x * cos_theta + coord_vertex_y * sin_theta;
coord_vertex_s = - coord_vertex_x * sin_theta + coord_vertex_y * cos_theta;
// Note: Now rotate the image volume (with - t_theata degree) instead of the normal gantry rotation
// In the new coordiantor, detector plane remains and is prependicular to the t axis
// in <t,s> system
coord_vertexOnDetector_x = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_s - Source_y) + coord_vertex_s ;
if (FBCT)
coord_vertexOnDetector_z = coord_voxel_z ; //FBCT geometry, no magnification along z axis
else
coord_vertexOnDetector_z = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_z - Source_z) + coord_vertex_z ; // CBCT geometry
// the projcetion of the vertex of the voxel
minY= fmin(minY, coord_vertexOnDetector_x);
maxY= fmax(maxY, coord_vertexOnDetector_x);
minZ= fmin(minZ, coord_vertexOnDetector_z);
maxZ= fmax(maxZ, coord_vertexOnDetector_z);
// form a minimim bounding rectangle (MBR) for these vertexes
}
minY_index = floor( (minY - Detector_Ymin ) * inv_Detector_pixel +0.5f);
maxY_index = floor( (maxY - Detector_Ymin ) * inv_Detector_pixel +0.5f);
minZ_index = floor( (minZ - Detector_Zmin ) * inv_Detector_pixel +0.5f);
maxZ_index = floor( (maxZ - Detector_Zmin ) * inv_Detector_pixel +0.5f);
// index of pixels of MBR boudaries on the detector
/***********************************/
// If this voxel does not project on this detector plane, it means there is no ray passing throught this voxel at this angle.
if ( (minY_index<0) && (maxY_index <0) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else if ( (minY_index>(R-1)) && (maxY_index >(R-1)) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else if ( (minZ_index<0) && (maxZ_index <0 ) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else if ( (minZ_index>(Z_prj-1)) && (maxZ_index >(Z_prj -1)) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else
// If this voxel projects on the detector plane
{
if (minY_index <=0)
minY_index = 0;
if (maxY_index >=(R-1) )
maxY_index = R-1;
if (minZ_index <=0)
minZ_index = 0;
if (maxZ_index >=(Z_prj-1) )
maxZ_index = Z_prj-1;
// for those projection pixels whose coordinate loacates inside MBR
// Each pixel coorresponds to a ray, and that ray must pass through the specific voxel
for (int j=minZ_index; j<=maxZ_index; j++)
for (int i=minY_index; i<=maxY_index; i++)
{
coord_pixelOnDetector_x = DOD * cos_theta - (Detector_Ymin + i*Detector_pixel_x) * sin_theta ;
coord_pixelOnDetector_y = DOD * sin_theta + (Detector_Ymin + i*Detector_pixel_x) * cos_theta ;
coord_pixelOnDetector_z = Detector_Zmin + j*Detector_pixel_x;
// coordinate of the detector pixel inside MBR in (x,y,z) system after normal gantry rotation
/** Weighted Update for Half Detector **/
// if ( (float(i)*Detector_pixel_x) < 2.0f*abs(Offset) )
// weight = 1.0f;
// else
// {
// tao = atan( ( float(R/2-i)*Detector_pixel_x + abs(Offset) ) / DSO);
// weight = cos(PI/4*(tao/tao_m1 - 1));
// weight = weight * weight;
// }
/******/
// Next: investigate the line starting at x1 and ending at x2
// find out all the rays whose projection lies in the rectangle.
if ( (coord_source_x == coord_pixelOnDetector_x) || (coord_source_y == coord_pixelOnDetector_y) )
// Otherwise you should slightly roate the angle to avoid these situations
{
Error=0;
// assert(Error);
sumWeight = 0.0f;
}
else // if ( (coord_source_x != coord_pixelOnDetector_x) && (coord_source_y != coord_pixelOnDetector_y) )
{
alpha_x_i_1 = ( (coord_voxel_x - 0.5f*volumn_x) - coord_pixelOnDetector_x )/( coord_source_x - coord_pixelOnDetector_x );
alpha_x_i = ( (coord_voxel_x + 0.5f*volumn_x) - coord_pixelOnDetector_x )/( coord_source_x - coord_pixelOnDetector_x );
alpha_y_i_1 = ( (coord_voxel_y - 0.5f*volumn_y) - coord_pixelOnDetector_y )/( coord_source_y - coord_pixelOnDetector_y );
alpha_y_i = ( (coord_voxel_y + 0.5f*volumn_y) - coord_pixelOnDetector_y )/( coord_source_y - coord_pixelOnDetector_y );
alpha_z_i_1 = ( (coord_voxel_z - 0.5f*volumn_z) - coord_pixelOnDetector_z )/( coord_source_z - coord_pixelOnDetector_z );
alpha_z_i = ( (coord_voxel_z + 0.5f*volumn_z) - coord_pixelOnDetector_z )/( coord_source_z - coord_pixelOnDetector_z );
// find out indices of the two most closet x planes near this specific voxel
alpha_x_temp = fmin((alpha_x_i_1), (alpha_x_i));
alpha_y_temp = fmin((alpha_y_i_1), (alpha_y_i));
if (fabs(coord_source_z - coord_pixelOnDetector_z) < volumn_z*1e-6)
alpha_z_temp = -MAX_infi;
else
alpha_z_temp = fmin((alpha_z_i_1), (alpha_z_i));
alpha_min = fmax(fmax(alpha_x_temp, alpha_y_temp), fmax(alpha_y_temp, alpha_z_temp));
// alpha_min is the enter point for one specific voxel
alpha_x_temp = fmax((alpha_x_i_1), (alpha_x_i));
alpha_y_temp = fmax((alpha_y_i_1), (alpha_y_i));
if (fabs(coord_source_z - coord_pixelOnDetector_z) < volumn_z*1e-6)
alpha_z_temp = MAX_infi;
else
alpha_z_temp = fmax((alpha_z_i_1), (alpha_z_i));
alpha_max = fmin(fmin(alpha_x_temp, alpha_y_temp), fmin(alpha_y_temp, alpha_z_temp));
// alpha_max is the exit point of the line passing through this voxel
if (alpha_max-alpha_min>0) // if the value is negative, it means the ray does not pass through this voxel
{
d_x1_x2 = sqrt((coord_source_x-coord_pixelOnDetector_x)*(coord_source_x-coord_pixelOnDetector_x) + (coord_source_y-coord_pixelOnDetector_y)*(coord_source_y - coord_pixelOnDetector_y) + (coord_source_z-coord_pixelOnDetector_z)*(coord_source_z-coord_pixelOnDetector_z) );
float temp = d_x1_x2*(alpha_max-alpha_min);
if ( temp > volumn_x*1e-6)
// the line passes through the voxel with a sufficient length;
{
sumWeight = sumWeight + temp*d_proj_correction[j*R + i];
// Note: d_proj_correction[j*R + i] is c(i) which has been previously calculated
// Note: d_x1_x2 * (alpha_max - alpha_min) is w(i) for ray i of this projection
sumLength = sumLength + temp;
}
}
}
}// end for loop: all the rays whose projection fits in the rectangle
if (sumLength < volumn_x*1e-6)
d_volumn_kernel[image_voxel_index] += 0.0f ;
else
{
if (command==0)
d_volumn_kernel[image_voxel_index] += beta_temp * sumWeight ; // matched ajoint operator, for test use
else if (command==1)
d_volumn_kernel[image_voxel_index] += beta_temp * sumWeight/sumLength ;
}
}//end else if this voxel projects on this detector plane
}//end else if the reconstruction region is in the circle
// __syncthreads();
}
|
2a7e9d9aaa707eaa31b9a136de41f814e1e2b47e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
template<typename ExecutionPolicy, typename Iterator, typename Compare, typename Iterator2>
__global__
void sort_kernel(ExecutionPolicy exec, Iterator first, Iterator last, Compare comp, Iterator2 is_supported)
{
//#if (__CUDA_ARCH__ >= 200) //Need to Recheck
if __HIP_ARCH_HAS_GLOBAL_INT64_ATOMICS__ {
*is_supported = true;
thrust::sort(exec, first, last, comp);
}
//#else //commented while converting the flags
else{
*is_supported = false;
}
//#endif //commented while converting the flags
}
template<typename T>
struct my_less
{
__host__ __device__
bool operator()(const T& lhs, const T& rhs) const
{
return lhs < rhs;
}
};
template<typename T, typename ExecutionPolicy, typename Compare>
void TestComparisonSortDevice(ExecutionPolicy exec, const size_t n, Compare comp)
{
thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
thrust::device_vector<T> d_data = h_data;
thrust::device_vector<bool> is_supported(1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(sort_kernel), dim3(1), dim3(1), 0, 0, exec, d_data.begin(), d_data.end(), comp, is_supported.begin());
if(is_supported[0])
{
thrust::sort(h_data.begin(), h_data.end(), comp);
ASSERT_EQUAL(h_data, d_data);
}
};
template<typename T>
struct TestComparisonSortDeviceSeq
{
void operator()(const size_t n)
{
TestComparisonSortDevice<T>(thrust::seq, n, my_less<T>());
}
};
VariableUnitTest<
TestComparisonSortDeviceSeq,
unittest::type_list<unittest::int8_t,unittest::int32_t>
> TestComparisonSortDeviceSeqInstance;
template<typename T>
struct TestComparisonSortDeviceDevice
{
void operator()(const size_t n)
{
TestComparisonSortDevice<T>(thrust::device, n, my_less<T>());
}
};
VariableUnitTest<
TestComparisonSortDeviceDevice,
unittest::type_list<unittest::int8_t,unittest::int32_t>
> TestComparisonSortDeviceDeviceDeviceInstance;
template<typename T, typename ExecutionPolicy>
void TestSortDevice(ExecutionPolicy exec, const size_t n)
{
TestComparisonSortDevice<T>(exec, n, thrust::less<T>());
};
template<typename T>
struct TestSortDeviceSeq
{
void operator()(const size_t n)
{
TestSortDevice<T>(thrust::seq, n);
}
};
VariableUnitTest<
TestSortDeviceSeq,
unittest::type_list<unittest::int8_t,unittest::int32_t>
> TestSortDeviceSeqInstance;
template<typename T>
struct TestSortDeviceDevice
{
void operator()(const size_t n)
{
TestSortDevice<T>(thrust::device, n);
}
};
VariableUnitTest<
TestSortDeviceDevice,
unittest::type_list<unittest::int8_t,unittest::int32_t>
> TestSortDeviceDeviceInstance;
void TestSortCudaStreams()
{
thrust::device_vector<int> keys(10);
keys[0] = 9;
keys[1] = 3;
keys[2] = 2;
keys[3] = 0;
keys[4] = 4;
keys[5] = 7;
keys[6] = 8;
keys[7] = 1;
keys[8] = 5;
keys[9] = 6;
hipStream_t s;
hipStreamCreate(&s);
thrust::sort(thrust::hip::par.on(s), keys.begin(), keys.end());
hipStreamSynchronize(s);
ASSERT_EQUAL(true, thrust::is_sorted(keys.begin(), keys.end()));
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestSortCudaStreams);
void TestComparisonSortCudaStreams()
{
thrust::device_vector<int> keys(10);
keys[0] = 9;
keys[1] = 3;
keys[2] = 2;
keys[3] = 0;
keys[4] = 4;
keys[5] = 7;
keys[6] = 8;
keys[7] = 1;
keys[8] = 5;
keys[9] = 6;
hipStream_t s;
hipStreamCreate(&s);
thrust::sort(thrust::hip::par.on(s), keys.begin(), keys.end(), my_less<int>());
hipStreamSynchronize(s);
ASSERT_EQUAL(true, thrust::is_sorted(keys.begin(), keys.end(), my_less<int>()));
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestComparisonSortCudaStreams);
| 2a7e9d9aaa707eaa31b9a136de41f814e1e2b47e.cu | #include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
template<typename ExecutionPolicy, typename Iterator, typename Compare, typename Iterator2>
__global__
void sort_kernel(ExecutionPolicy exec, Iterator first, Iterator last, Compare comp, Iterator2 is_supported)
{
//#if (__CUDA_ARCH__ >= 200) //Need to Recheck
if __HIP_ARCH_HAS_GLOBAL_INT64_ATOMICS__ {
*is_supported = true;
thrust::sort(exec, first, last, comp);
}
//#else //commented while converting the flags
else{
*is_supported = false;
}
//#endif //commented while converting the flags
}
template<typename T>
struct my_less
{
__host__ __device__
bool operator()(const T& lhs, const T& rhs) const
{
return lhs < rhs;
}
};
template<typename T, typename ExecutionPolicy, typename Compare>
void TestComparisonSortDevice(ExecutionPolicy exec, const size_t n, Compare comp)
{
thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
thrust::device_vector<T> d_data = h_data;
thrust::device_vector<bool> is_supported(1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(sort_kernel), dim3(1), dim3(1), 0, 0, exec, d_data.begin(), d_data.end(), comp, is_supported.begin());
if(is_supported[0])
{
thrust::sort(h_data.begin(), h_data.end(), comp);
ASSERT_EQUAL(h_data, d_data);
}
};
template<typename T>
struct TestComparisonSortDeviceSeq
{
void operator()(const size_t n)
{
TestComparisonSortDevice<T>(thrust::seq, n, my_less<T>());
}
};
VariableUnitTest<
TestComparisonSortDeviceSeq,
unittest::type_list<unittest::int8_t,unittest::int32_t>
> TestComparisonSortDeviceSeqInstance;
template<typename T>
struct TestComparisonSortDeviceDevice
{
void operator()(const size_t n)
{
TestComparisonSortDevice<T>(thrust::device, n, my_less<T>());
}
};
VariableUnitTest<
TestComparisonSortDeviceDevice,
unittest::type_list<unittest::int8_t,unittest::int32_t>
> TestComparisonSortDeviceDeviceDeviceInstance;
template<typename T, typename ExecutionPolicy>
void TestSortDevice(ExecutionPolicy exec, const size_t n)
{
TestComparisonSortDevice<T>(exec, n, thrust::less<T>());
};
template<typename T>
struct TestSortDeviceSeq
{
void operator()(const size_t n)
{
TestSortDevice<T>(thrust::seq, n);
}
};
VariableUnitTest<
TestSortDeviceSeq,
unittest::type_list<unittest::int8_t,unittest::int32_t>
> TestSortDeviceSeqInstance;
template<typename T>
struct TestSortDeviceDevice
{
void operator()(const size_t n)
{
TestSortDevice<T>(thrust::device, n);
}
};
VariableUnitTest<
TestSortDeviceDevice,
unittest::type_list<unittest::int8_t,unittest::int32_t>
> TestSortDeviceDeviceInstance;
void TestSortCudaStreams()
{
thrust::device_vector<int> keys(10);
keys[0] = 9;
keys[1] = 3;
keys[2] = 2;
keys[3] = 0;
keys[4] = 4;
keys[5] = 7;
keys[6] = 8;
keys[7] = 1;
keys[8] = 5;
keys[9] = 6;
hipStream_t s;
hipStreamCreate(&s);
thrust::sort(thrust::cuda::par.on(s), keys.begin(), keys.end());
hipStreamSynchronize(s);
ASSERT_EQUAL(true, thrust::is_sorted(keys.begin(), keys.end()));
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestSortCudaStreams);
void TestComparisonSortCudaStreams()
{
thrust::device_vector<int> keys(10);
keys[0] = 9;
keys[1] = 3;
keys[2] = 2;
keys[3] = 0;
keys[4] = 4;
keys[5] = 7;
keys[6] = 8;
keys[7] = 1;
keys[8] = 5;
keys[9] = 6;
hipStream_t s;
hipStreamCreate(&s);
thrust::sort(thrust::cuda::par.on(s), keys.begin(), keys.end(), my_less<int>());
hipStreamSynchronize(s);
ASSERT_EQUAL(true, thrust::is_sorted(keys.begin(), keys.end(), my_less<int>()));
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestComparisonSortCudaStreams);
|
41488da5ae4145f9020b03c9edf0493693575181.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//#define NVBIO_CUDA_DEBUG
//#define NVBIO_CUDA_ASSERTS
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/console.h>
#include <nvBowtie/bowtie2/cuda/seed_hit_deque_array.h>
namespace nvbio {
namespace bowtie2 {
namespace cuda {
namespace { // anonymous namespace
__global__
void setup_deques_kernel(SeedHitDequeArrayDeviceView seed_hit_deques, const uint32 n_reads, uint32* error)
{
if (threadIdx.x >= n_reads)
return;
typedef SeedHitDequeArrayDeviceView::reference hit_deque_reference;
typedef SeedHitDequeArrayDeviceView::hit_deque_type hit_deque_type;
// fetch the deque bound to this read
hit_deque_reference hit_deque = seed_hit_deques[ threadIdx.x ];
// alloc storage for 2 entries in this read's deque
hit_deque.alloc( 2u );
// push first hit
hit_deque.push( SeedHit( STANDARD, FORWARD, threadIdx.x * 2u + 1u, make_uint2( 0, 100 ) ) );
// push second hit
hit_deque.push( SeedHit( STANDARD, FORWARD, threadIdx.x * 2u + 0u, make_uint2( 0, 10 ) ) );
}
__global__
void check_deques_kernel(SeedHitDequeArrayDeviceView seed_hit_deques, const uint32 n_reads, uint32* error)
{
if (threadIdx.x >= n_reads)
return;
typedef SeedHitDequeArrayDeviceView::reference hit_deque_reference;
typedef SeedHitDequeArrayDeviceView::hit_deque_type hit_deque_type;
// fetch the deque bound to this read
hit_deque_reference hit_deque = seed_hit_deques[ threadIdx.x ];
SeedHit hit;
// pop first hit
hit = hit_deque.top(); hit_deque.pop();
if (hit.get_posinread() != threadIdx.x * 2u + 0u)
*error = 1;
// pop second hit
hit = hit_deque.top(); hit_deque.pop();
if (hit.get_posinread() != threadIdx.x * 2u + 1u)
*error = 2;
}
} // anonymous namespace
void test_seed_hit_deques()
{
log_info(stderr, "test seed_hit_deques... started\n");
SeedHitDequeArray seed_hit_deques;
const uint32 n_hits = 100;
const uint32 n_reads = 50;
const uint64 bytes = seed_hit_deques.resize( n_reads, n_hits );
log_info(stderr, " allocated %llu bytes\n", bytes);
thrust::device_vector<uint32> error(1,0);
hipLaunchKernelGGL(( setup_deques_kernel), dim3(1),dim3(128), 0, 0, seed_hit_deques.device_view(), n_reads, device_view( error ) );
hipLaunchKernelGGL(( check_deques_kernel), dim3(1),dim3(128), 0, 0, seed_hit_deques.device_view(), n_reads, device_view( error ) );
hipDeviceSynchronize();
const uint32 error_code = error[0];
if (error_code)
log_error( stderr, "test_read_hits_index failed! (error code: %u)\n", error_code );
log_info(stderr, "test seed_hit_deques... done\n");
}
} // namespace cuda
} // namespace bowtie2
} // namespace nvbio
| 41488da5ae4145f9020b03c9edf0493693575181.cu | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//#define NVBIO_CUDA_DEBUG
//#define NVBIO_CUDA_ASSERTS
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/console.h>
#include <nvBowtie/bowtie2/cuda/seed_hit_deque_array.h>
namespace nvbio {
namespace bowtie2 {
namespace cuda {
namespace { // anonymous namespace
__global__
void setup_deques_kernel(SeedHitDequeArrayDeviceView seed_hit_deques, const uint32 n_reads, uint32* error)
{
if (threadIdx.x >= n_reads)
return;
typedef SeedHitDequeArrayDeviceView::reference hit_deque_reference;
typedef SeedHitDequeArrayDeviceView::hit_deque_type hit_deque_type;
// fetch the deque bound to this read
hit_deque_reference hit_deque = seed_hit_deques[ threadIdx.x ];
// alloc storage for 2 entries in this read's deque
hit_deque.alloc( 2u );
// push first hit
hit_deque.push( SeedHit( STANDARD, FORWARD, threadIdx.x * 2u + 1u, make_uint2( 0, 100 ) ) );
// push second hit
hit_deque.push( SeedHit( STANDARD, FORWARD, threadIdx.x * 2u + 0u, make_uint2( 0, 10 ) ) );
}
__global__
void check_deques_kernel(SeedHitDequeArrayDeviceView seed_hit_deques, const uint32 n_reads, uint32* error)
{
if (threadIdx.x >= n_reads)
return;
typedef SeedHitDequeArrayDeviceView::reference hit_deque_reference;
typedef SeedHitDequeArrayDeviceView::hit_deque_type hit_deque_type;
// fetch the deque bound to this read
hit_deque_reference hit_deque = seed_hit_deques[ threadIdx.x ];
SeedHit hit;
// pop first hit
hit = hit_deque.top(); hit_deque.pop();
if (hit.get_posinread() != threadIdx.x * 2u + 0u)
*error = 1;
// pop second hit
hit = hit_deque.top(); hit_deque.pop();
if (hit.get_posinread() != threadIdx.x * 2u + 1u)
*error = 2;
}
} // anonymous namespace
void test_seed_hit_deques()
{
log_info(stderr, "test seed_hit_deques... started\n");
SeedHitDequeArray seed_hit_deques;
const uint32 n_hits = 100;
const uint32 n_reads = 50;
const uint64 bytes = seed_hit_deques.resize( n_reads, n_hits );
log_info(stderr, " allocated %llu bytes\n", bytes);
thrust::device_vector<uint32> error(1,0);
setup_deques_kernel<<<1,128>>>( seed_hit_deques.device_view(), n_reads, device_view( error ) );
check_deques_kernel<<<1,128>>>( seed_hit_deques.device_view(), n_reads, device_view( error ) );
cudaThreadSynchronize();
const uint32 error_code = error[0];
if (error_code)
log_error( stderr, "test_read_hits_index failed! (error code: %u)\n", error_code );
log_info(stderr, "test seed_hit_deques... done\n");
}
} // namespace cuda
} // namespace bowtie2
} // namespace nvbio
|
65822776725d6ebb22e56a1b7e758c11f2874468.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <cuPrintf.cu>
#include <float.h>
#include "kernels.hip"
#include "naive.h"
#include <string.h>
//#define eps_f FLT_EPSILON
int main(int argc, char** argv){
size_t buff=9999999;
cudaPrintfInit(buff);
if(argc < 2){
puts("neoNbody: Usage: neoNbody numPoints");
exit(0);
}
int L_SIZE = atoi(argv[1]);
int N_POINTS = L_SIZE * L_SIZE;
printf("%d %d\n",L_SIZE,N_POINTS);
hipError_t status = hipSuccess;
// Alloc CPU memory
float3* r_CPU;
r_CPU = (float3*) malloc(sizeof(float3) * N_POINTS * N_POINTS);
float* u_CPU;
u_CPU = (float*) malloc(sizeof(float) * 3 * 3 * N_POINTS * N_POINTS);
fillR_CPU(r_CPU,L_SIZE);
// Alloc GPU Memory
float3 *r_GPU;
status=hipMalloc((void**) &r_GPU, sizeof(float3) * N_POINTS);
if (status != hipSuccess)
fprintf(stderr,"hipMalloc: allocating r: %s\n", status);
status=hipMemcpy(r_GPU,r_CPU,sizeof(float3) * N_POINTS,hipMemcpyHostToDevice);
if (status != hipSuccess) fprintf(stderr, "hipMemcpy: r: copy from Host to Device\n",status);
float *u_GPU;
status=hipMalloc((void**) &u_GPU, sizeof(float) * N_POINTS * N_POINTS * 3 * 3);
if (status != hipSuccess)
fprintf(stderr,"hipMalloc: allocating u: %s\n", status);
// Calculate U
dim3 blcsGrid(L_SIZE,1);
dim3 thrdsBlck(L_SIZE,1);
hipLaunchKernelGGL(( calcU_GPU), dim3(blcsGrid),dim3(thrdsBlck), 0, 0, r_GPU,u_GPU);
status=hipMemcpy(u_CPU,u_GPU,sizeof(float) * N_POINTS * N_POINTS * 3 * 3, hipMemcpyDeviceToHost);
if (status != hipSuccess)
fprintf(stderr,"hipMemcpy: copy from Device to Host u: %s\n", status);
/*
for(int i=0;i<N_POINTS*9;i++){
printf("%d\n",i/9);
printf("%f %f %f\n",u_CPU[i++],u_CPU[i++],u_CPU[i++]);
printf("%f %f %f\n",u_CPU[i++],u_CPU[i++],u_CPU[i++]);
printf("%f %f %f\n",u_CPU[i++],u_CPU[i++],u_CPU[i]);
puts("");
}
*/
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
} | 65822776725d6ebb22e56a1b7e758c11f2874468.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuPrintf.cu>
#include <float.h>
#include "kernels.cu"
#include "naive.h"
#include <string.h>
//#define eps_f FLT_EPSILON
int main(int argc, char** argv){
size_t buff=9999999;
cudaPrintfInit(buff);
if(argc < 2){
puts("neoNbody: Usage: neoNbody numPoints");
exit(0);
}
int L_SIZE = atoi(argv[1]);
int N_POINTS = L_SIZE * L_SIZE;
printf("%d %d\n",L_SIZE,N_POINTS);
cudaError_t status = cudaSuccess;
// Alloc CPU memory
float3* r_CPU;
r_CPU = (float3*) malloc(sizeof(float3) * N_POINTS * N_POINTS);
float* u_CPU;
u_CPU = (float*) malloc(sizeof(float) * 3 * 3 * N_POINTS * N_POINTS);
fillR_CPU(r_CPU,L_SIZE);
// Alloc GPU Memory
float3 *r_GPU;
status=cudaMalloc((void**) &r_GPU, sizeof(float3) * N_POINTS);
if (status != cudaSuccess)
fprintf(stderr,"cudaMalloc: allocating r: %s\n", status);
status=cudaMemcpy(r_GPU,r_CPU,sizeof(float3) * N_POINTS,cudaMemcpyHostToDevice);
if (status != cudaSuccess) fprintf(stderr, "cudaMemcpy: r: copy from Host to Device\n",status);
float *u_GPU;
status=cudaMalloc((void**) &u_GPU, sizeof(float) * N_POINTS * N_POINTS * 3 * 3);
if (status != cudaSuccess)
fprintf(stderr,"cudaMalloc: allocating u: %s\n", status);
// Calculate U
dim3 blcsGrid(L_SIZE,1);
dim3 thrdsBlck(L_SIZE,1);
calcU_GPU<<<blcsGrid,thrdsBlck>>>(r_GPU,u_GPU);
status=cudaMemcpy(u_CPU,u_GPU,sizeof(float) * N_POINTS * N_POINTS * 3 * 3, cudaMemcpyDeviceToHost);
if (status != cudaSuccess)
fprintf(stderr,"cudaMemcpy: copy from Device to Host u: %s\n", status);
/*
for(int i=0;i<N_POINTS*9;i++){
printf("%d\n",i/9);
printf("%f %f %f\n",u_CPU[i++],u_CPU[i++],u_CPU[i++]);
printf("%f %f %f\n",u_CPU[i++],u_CPU[i++],u_CPU[i++]);
printf("%f %f %f\n",u_CPU[i++],u_CPU[i++],u_CPU[i]);
puts("");
}
*/
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
} |
e47c0e1c615685e17a93c08dd125340d74d6951f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "groupby.hpp"
#include <utilities/cuda_utils.hpp>
#include <quantiles/quantiles.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <cudf/cudf.h>
#include <cudf/types.hpp>
#include <cudf/legacy/column.hpp>
#include <rmm/rmm.h>
#include <thrust/for_each.h>
namespace cudf {
namespace {
struct quantiles_functor {
template <typename T>
std::enable_if_t<std::is_arithmetic<T>::value, void >
operator()(gdf_column const& values_col,
rmm::device_vector<gdf_size_type> const& group_offsets,
rmm::device_vector<gdf_size_type> const& group_sizes,
gdf_column* result_col, rmm::device_vector<double> const& quantile,
cudf::interpolation interpolation, hipStream_t stream = 0)
{
// prepare args to be used by lambda below
auto result = static_cast<double*>(result_col->data);
auto values = static_cast<T*>(values_col.data);
auto group_id = group_offsets.data().get();
auto group_size = group_sizes.data().get();
auto d_quantiles = quantile.data().get();
auto num_quantiles = quantile.size();
// For each group, calculate quantile
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
group_offsets.size(),
[=] __device__ (gdf_size_type i) {
gdf_size_type segment_size = group_size[i];
auto value = values + group_id[i];
thrust::transform(thrust::seq, d_quantiles, d_quantiles + num_quantiles,
result + i * num_quantiles,
[=](auto q) {
return detail::select_quantile(value,
segment_size,
q,
interpolation);
});
}
);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_arithmetic<T>::value, void >
operator()(Args&&... args) {
CUDF_FAIL("Only arithmetic types are supported in quantiles");
}
};
} // namespace anonymous
// TODO: add optional check for is_sorted. Use context.flag_sorted
std::pair<cudf::table, cudf::table>
group_quantiles(cudf::table const& keys,
cudf::table const& values,
std::vector<double> const& quantiles,
cudf::interpolation interpolation,
bool include_nulls)
{
detail::groupby gb_obj(keys, include_nulls);
auto group_offsets = gb_obj.group_offsets();
rmm::device_vector<double> dv_quantiles(quantiles);
cudf::table result_table(gb_obj.num_groups() * quantiles.size(),
std::vector<gdf_dtype>(values.num_columns(), GDF_FLOAT64),
std::vector<gdf_dtype_extra_info>(values.num_columns()));
for (gdf_size_type i = 0; i < values.num_columns(); i++)
{
gdf_column sorted_values;
rmm::device_vector<gdf_size_type> group_sizes;
std::tie(sorted_values, group_sizes) =
gb_obj.sort_values(*(values.get_column(i)));
gdf_column* result_col = result_table.get_column(i);
type_dispatcher(sorted_values.dtype, quantiles_functor{},
sorted_values, group_offsets, group_sizes, result_col,
dv_quantiles, interpolation);
gdf_column_free(&sorted_values);
}
return std::make_pair(gb_obj.unique_keys(), result_table);
}
} // namespace cudf
| e47c0e1c615685e17a93c08dd125340d74d6951f.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "groupby.hpp"
#include <utilities/cuda_utils.hpp>
#include <quantiles/quantiles.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <cudf/cudf.h>
#include <cudf/types.hpp>
#include <cudf/legacy/column.hpp>
#include <rmm/rmm.h>
#include <thrust/for_each.h>
namespace cudf {
namespace {
struct quantiles_functor {
template <typename T>
std::enable_if_t<std::is_arithmetic<T>::value, void >
operator()(gdf_column const& values_col,
rmm::device_vector<gdf_size_type> const& group_offsets,
rmm::device_vector<gdf_size_type> const& group_sizes,
gdf_column* result_col, rmm::device_vector<double> const& quantile,
cudf::interpolation interpolation, cudaStream_t stream = 0)
{
// prepare args to be used by lambda below
auto result = static_cast<double*>(result_col->data);
auto values = static_cast<T*>(values_col.data);
auto group_id = group_offsets.data().get();
auto group_size = group_sizes.data().get();
auto d_quantiles = quantile.data().get();
auto num_quantiles = quantile.size();
// For each group, calculate quantile
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
group_offsets.size(),
[=] __device__ (gdf_size_type i) {
gdf_size_type segment_size = group_size[i];
auto value = values + group_id[i];
thrust::transform(thrust::seq, d_quantiles, d_quantiles + num_quantiles,
result + i * num_quantiles,
[=](auto q) {
return detail::select_quantile(value,
segment_size,
q,
interpolation);
});
}
);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_arithmetic<T>::value, void >
operator()(Args&&... args) {
CUDF_FAIL("Only arithmetic types are supported in quantiles");
}
};
} // namespace anonymous
// TODO: add optional check for is_sorted. Use context.flag_sorted
std::pair<cudf::table, cudf::table>
group_quantiles(cudf::table const& keys,
cudf::table const& values,
std::vector<double> const& quantiles,
cudf::interpolation interpolation,
bool include_nulls)
{
detail::groupby gb_obj(keys, include_nulls);
auto group_offsets = gb_obj.group_offsets();
rmm::device_vector<double> dv_quantiles(quantiles);
cudf::table result_table(gb_obj.num_groups() * quantiles.size(),
std::vector<gdf_dtype>(values.num_columns(), GDF_FLOAT64),
std::vector<gdf_dtype_extra_info>(values.num_columns()));
for (gdf_size_type i = 0; i < values.num_columns(); i++)
{
gdf_column sorted_values;
rmm::device_vector<gdf_size_type> group_sizes;
std::tie(sorted_values, group_sizes) =
gb_obj.sort_values(*(values.get_column(i)));
gdf_column* result_col = result_table.get_column(i);
type_dispatcher(sorted_values.dtype, quantiles_functor{},
sorted_values, group_offsets, group_sizes, result_col,
dv_quantiles, interpolation);
gdf_column_free(&sorted_values);
}
return std::make_pair(gb_obj.unique_keys(), result_table);
}
} // namespace cudf
|
f08ff6bff5586718fe931fef85962bb7e1f7c410.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 by Contributors
* @file array/cuda/csr2coo.cc
* @brief CSR2COO
*/
#include <dgl/array.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include "../../runtime/cuda/cuda_common.h"
#include "./dgl_cub.cuh"
#include "./utils.h"
namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {
template <DGLDeviceType XPU, typename IdType>
COOMatrix CSRToCOO(CSRMatrix csr) {
LOG(FATAL) << "Unreachable codes";
return {};
}
template <>
COOMatrix CSRToCOO<kDGLCUDA, int32_t>(CSRMatrix csr) {
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
hipStream_t stream = runtime::getCurrentHIPStreamMasqueradingAsCUDA();
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, stream));
NDArray indptr = csr.indptr, indices = csr.indices, data = csr.data;
const int32_t* indptr_ptr = static_cast<int32_t*>(indptr->data);
NDArray row =
aten::NewIdArray(indices->shape[0], indptr->ctx, indptr->dtype.bits);
int32_t* row_ptr = static_cast<int32_t*>(row->data);
CUSPARSE_CALL(hipsparseXcsr2coo(
thr_entry->cusparse_handle, indptr_ptr, indices->shape[0], csr.num_rows,
row_ptr, HIPSPARSE_INDEX_BASE_ZERO));
return COOMatrix(
csr.num_rows, csr.num_cols, row, indices, data, true, csr.sorted);
}
struct RepeatIndex {
template <typename IdType>
__host__ __device__ auto operator()(IdType i) {
return thrust::make_constant_iterator(i);
}
};
template <typename IdType>
struct OutputBufferIndexer {
const IdType* indptr;
IdType* buffer;
__host__ __device__ auto operator()(IdType i) { return buffer + indptr[i]; }
};
template <typename IdType>
struct AdjacentDifference {
const IdType* indptr;
__host__ __device__ auto operator()(IdType i) {
return indptr[i + 1] - indptr[i];
}
};
template <>
COOMatrix CSRToCOO<kDGLCUDA, int64_t>(CSRMatrix csr) {
const auto& ctx = csr.indptr->ctx;
hipStream_t stream = runtime::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t nnz = csr.indices->shape[0];
const auto nbits = csr.indptr->dtype.bits;
IdArray ret_row = NewIdArray(nnz, ctx, nbits);
runtime::CUDAWorkspaceAllocator allocator(csr.indptr->ctx);
thrust::counting_iterator<int64_t> iota(0);
auto input_buffer = thrust::make_transform_iterator(iota, RepeatIndex{});
auto output_buffer = thrust::make_transform_iterator(
iota, OutputBufferIndexer<int64_t>{
csr.indptr.Ptr<int64_t>(), ret_row.Ptr<int64_t>()});
auto buffer_sizes = thrust::make_transform_iterator(
iota, AdjacentDifference<int64_t>{csr.indptr.Ptr<int64_t>()});
constexpr int64_t max_copy_at_once = std::numeric_limits<int32_t>::max();
for (int64_t i = 0; i < csr.num_rows; i += max_copy_at_once) {
std::size_t temp_storage_bytes = 0;
CUDA_CALL(cub::DeviceCopy::Batched(
nullptr, temp_storage_bytes, input_buffer + i, output_buffer + i,
buffer_sizes + i, ::min(csr.num_rows - i, max_copy_at_once),
stream));
auto temp = allocator.alloc_unique<char>(temp_storage_bytes);
CUDA_CALL(cub::DeviceCopy::Batched(
temp.get(), temp_storage_bytes, input_buffer + i, output_buffer + i,
buffer_sizes + i, ::min(csr.num_rows - i, max_copy_at_once),
stream));
}
return COOMatrix(
csr.num_rows, csr.num_cols, ret_row, csr.indices, csr.data, true,
csr.sorted);
}
template COOMatrix CSRToCOO<kDGLCUDA, int32_t>(CSRMatrix csr);
template COOMatrix CSRToCOO<kDGLCUDA, int64_t>(CSRMatrix csr);
template <DGLDeviceType XPU, typename IdType>
COOMatrix CSRToCOODataAsOrder(CSRMatrix csr) {
LOG(FATAL) << "Unreachable codes";
return {};
}
template <>
COOMatrix CSRToCOODataAsOrder<kDGLCUDA, int32_t>(CSRMatrix csr) {
COOMatrix coo = CSRToCOO<kDGLCUDA, int32_t>(csr);
if (aten::IsNullArray(coo.data)) return coo;
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
auto device = runtime::DeviceAPI::Get(coo.row->ctx);
hipStream_t stream = runtime::getCurrentHIPStreamMasqueradingAsCUDA();
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, stream));
NDArray row = coo.row, col = coo.col, data = coo.data;
int32_t* row_ptr = static_cast<int32_t*>(row->data);
int32_t* col_ptr = static_cast<int32_t*>(col->data);
int32_t* data_ptr = static_cast<int32_t*>(data->data);
size_t workspace_size = 0;
CUSPARSE_CALL(hipsparseXcoosort_bufferSizeExt(
thr_entry->cusparse_handle, coo.num_rows, coo.num_cols, row->shape[0],
data_ptr, row_ptr, &workspace_size));
void* workspace = device->AllocWorkspace(row->ctx, workspace_size);
CUSPARSE_CALL(hipsparseXcoosortByRow(
thr_entry->cusparse_handle, coo.num_rows, coo.num_cols, row->shape[0],
data_ptr, row_ptr, col_ptr, workspace));
device->FreeWorkspace(row->ctx, workspace);
// The row and column field have already been reordered according
// to data, thus the data field will be deprecated.
coo.data = aten::NullArray();
coo.row_sorted = false;
coo.col_sorted = false;
return coo;
}
template <>
COOMatrix CSRToCOODataAsOrder<kDGLCUDA, int64_t>(CSRMatrix csr) {
COOMatrix coo = CSRToCOO<kDGLCUDA, int64_t>(csr);
if (aten::IsNullArray(coo.data)) return coo;
const auto& sorted = Sort(coo.data);
coo.row = IndexSelect(coo.row, sorted.second);
coo.col = IndexSelect(coo.col, sorted.second);
// The row and column field have already been reordered according
// to data, thus the data field will be deprecated.
coo.data = aten::NullArray();
coo.row_sorted = false;
coo.col_sorted = false;
return coo;
}
template COOMatrix CSRToCOODataAsOrder<kDGLCUDA, int32_t>(CSRMatrix csr);
template COOMatrix CSRToCOODataAsOrder<kDGLCUDA, int64_t>(CSRMatrix csr);
} // namespace impl
} // namespace aten
} // namespace dgl
| f08ff6bff5586718fe931fef85962bb7e1f7c410.cu | /**
* Copyright (c) 2020 by Contributors
* @file array/cuda/csr2coo.cc
* @brief CSR2COO
*/
#include <dgl/array.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include "../../runtime/cuda/cuda_common.h"
#include "./dgl_cub.cuh"
#include "./utils.h"
namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {
template <DGLDeviceType XPU, typename IdType>
COOMatrix CSRToCOO(CSRMatrix csr) {
LOG(FATAL) << "Unreachable codes";
return {};
}
template <>
COOMatrix CSRToCOO<kDGLCUDA, int32_t>(CSRMatrix csr) {
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
cudaStream_t stream = runtime::getCurrentCUDAStream();
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, stream));
NDArray indptr = csr.indptr, indices = csr.indices, data = csr.data;
const int32_t* indptr_ptr = static_cast<int32_t*>(indptr->data);
NDArray row =
aten::NewIdArray(indices->shape[0], indptr->ctx, indptr->dtype.bits);
int32_t* row_ptr = static_cast<int32_t*>(row->data);
CUSPARSE_CALL(cusparseXcsr2coo(
thr_entry->cusparse_handle, indptr_ptr, indices->shape[0], csr.num_rows,
row_ptr, CUSPARSE_INDEX_BASE_ZERO));
return COOMatrix(
csr.num_rows, csr.num_cols, row, indices, data, true, csr.sorted);
}
struct RepeatIndex {
template <typename IdType>
__host__ __device__ auto operator()(IdType i) {
return thrust::make_constant_iterator(i);
}
};
template <typename IdType>
struct OutputBufferIndexer {
const IdType* indptr;
IdType* buffer;
__host__ __device__ auto operator()(IdType i) { return buffer + indptr[i]; }
};
template <typename IdType>
struct AdjacentDifference {
const IdType* indptr;
__host__ __device__ auto operator()(IdType i) {
return indptr[i + 1] - indptr[i];
}
};
template <>
COOMatrix CSRToCOO<kDGLCUDA, int64_t>(CSRMatrix csr) {
const auto& ctx = csr.indptr->ctx;
cudaStream_t stream = runtime::getCurrentCUDAStream();
const int64_t nnz = csr.indices->shape[0];
const auto nbits = csr.indptr->dtype.bits;
IdArray ret_row = NewIdArray(nnz, ctx, nbits);
runtime::CUDAWorkspaceAllocator allocator(csr.indptr->ctx);
thrust::counting_iterator<int64_t> iota(0);
auto input_buffer = thrust::make_transform_iterator(iota, RepeatIndex{});
auto output_buffer = thrust::make_transform_iterator(
iota, OutputBufferIndexer<int64_t>{
csr.indptr.Ptr<int64_t>(), ret_row.Ptr<int64_t>()});
auto buffer_sizes = thrust::make_transform_iterator(
iota, AdjacentDifference<int64_t>{csr.indptr.Ptr<int64_t>()});
constexpr int64_t max_copy_at_once = std::numeric_limits<int32_t>::max();
for (int64_t i = 0; i < csr.num_rows; i += max_copy_at_once) {
std::size_t temp_storage_bytes = 0;
CUDA_CALL(cub::DeviceCopy::Batched(
nullptr, temp_storage_bytes, input_buffer + i, output_buffer + i,
buffer_sizes + i, std::min(csr.num_rows - i, max_copy_at_once),
stream));
auto temp = allocator.alloc_unique<char>(temp_storage_bytes);
CUDA_CALL(cub::DeviceCopy::Batched(
temp.get(), temp_storage_bytes, input_buffer + i, output_buffer + i,
buffer_sizes + i, std::min(csr.num_rows - i, max_copy_at_once),
stream));
}
return COOMatrix(
csr.num_rows, csr.num_cols, ret_row, csr.indices, csr.data, true,
csr.sorted);
}
template COOMatrix CSRToCOO<kDGLCUDA, int32_t>(CSRMatrix csr);
template COOMatrix CSRToCOO<kDGLCUDA, int64_t>(CSRMatrix csr);
template <DGLDeviceType XPU, typename IdType>
COOMatrix CSRToCOODataAsOrder(CSRMatrix csr) {
LOG(FATAL) << "Unreachable codes";
return {};
}
template <>
COOMatrix CSRToCOODataAsOrder<kDGLCUDA, int32_t>(CSRMatrix csr) {
COOMatrix coo = CSRToCOO<kDGLCUDA, int32_t>(csr);
if (aten::IsNullArray(coo.data)) return coo;
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
auto device = runtime::DeviceAPI::Get(coo.row->ctx);
cudaStream_t stream = runtime::getCurrentCUDAStream();
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, stream));
NDArray row = coo.row, col = coo.col, data = coo.data;
int32_t* row_ptr = static_cast<int32_t*>(row->data);
int32_t* col_ptr = static_cast<int32_t*>(col->data);
int32_t* data_ptr = static_cast<int32_t*>(data->data);
size_t workspace_size = 0;
CUSPARSE_CALL(cusparseXcoosort_bufferSizeExt(
thr_entry->cusparse_handle, coo.num_rows, coo.num_cols, row->shape[0],
data_ptr, row_ptr, &workspace_size));
void* workspace = device->AllocWorkspace(row->ctx, workspace_size);
CUSPARSE_CALL(cusparseXcoosortByRow(
thr_entry->cusparse_handle, coo.num_rows, coo.num_cols, row->shape[0],
data_ptr, row_ptr, col_ptr, workspace));
device->FreeWorkspace(row->ctx, workspace);
// The row and column field have already been reordered according
// to data, thus the data field will be deprecated.
coo.data = aten::NullArray();
coo.row_sorted = false;
coo.col_sorted = false;
return coo;
}
template <>
COOMatrix CSRToCOODataAsOrder<kDGLCUDA, int64_t>(CSRMatrix csr) {
COOMatrix coo = CSRToCOO<kDGLCUDA, int64_t>(csr);
if (aten::IsNullArray(coo.data)) return coo;
const auto& sorted = Sort(coo.data);
coo.row = IndexSelect(coo.row, sorted.second);
coo.col = IndexSelect(coo.col, sorted.second);
// The row and column field have already been reordered according
// to data, thus the data field will be deprecated.
coo.data = aten::NullArray();
coo.row_sorted = false;
coo.col_sorted = false;
return coo;
}
template COOMatrix CSRToCOODataAsOrder<kDGLCUDA, int32_t>(CSRMatrix csr);
template COOMatrix CSRToCOODataAsOrder<kDGLCUDA, int64_t>(CSRMatrix csr);
} // namespace impl
} // namespace aten
} // namespace dgl
|
444d74ffdc70e4bfb94cd2b997795cfee7effa9e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <time.h>
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 256
#endif
#ifndef N_ELEMS
#define N_ELEMS 753411
#endif
// Src: Lab1-CudaIntro. Get time difference
int timeval_subtract(
struct timeval *result,
struct timeval *t2,
struct timeval *t1)
{
unsigned int resolution = 1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) -
(t1->tv_usec + resolution * t2->tv_sec);
result->tv_sec = diff / resolution; result->tv_usec = diff % resolution;
return (diff<0);
}
__global__ void kernel(float *d_in, float *d_out, int N){
const unsigned int lid = threadIdx.x; // Local id inside a block
const unsigned int gid = blockIdx.x*blockDim.x + lid; // global id
if (gid < N){
d_out[gid] = powf(d_in[gid]/(d_in[gid]-2.3), 3);
}
}
void gpu_run(float* inp, float* out, int N)
{
// Most of this code is stolened from the lab1 slides
// Time tracking vars
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
// Block distr vars
unsigned int block_size = BLOCK_SIZE;
unsigned int num_blocks = ((N + (block_size - 1)) / block_size);
// Memory assignment
unsigned int mem_size = N*sizeof(float);
float* d_in;
float* d_out;
hipMalloc((void**)&d_in, mem_size);
hipMalloc((void**)&d_out, mem_size);
// Copy host mem to device
hipMemcpy(d_in, inp, mem_size, hipMemcpyHostToDevice);
// Exec kernel(with timetrack)
gettimeofday(&t_start, NULL);
hipLaunchKernelGGL(( kernel), dim3(num_blocks), dim3(block_size), 0, 0, d_in, d_out, N);
gettimeofday(&t_end, NULL);
// Copy result from device to host
hipMemcpy(out, d_out, mem_size, hipMemcpyDeviceToHost);
hipFree(d_in); hipFree(d_out);
// Calculate and print time
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("GPU Run took %d microseconds (%.2fms)\n", elapsed, elapsed / 1000.0);
}
void seq_run(float* inp, float* out, int N){
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
for(unsigned int i = 0; i < N; ++i){
out[i] = powf(inp[i]/(inp[i]-2.3), 3);
}
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("CPU Run took %d microseconds (%.2fms)\n", elapsed, elapsed / 1000.0);
}
int main( int argc, char** argv){
unsigned int N = N_ELEMS;
unsigned int mem_size = N*sizeof(float);
// Init memory arrays
float* in = (float*) malloc(mem_size);
float* gpu_out = (float*) malloc(mem_size);
float* seq_out = (float*) malloc(mem_size);
// And init the input array
for (unsigned int i=0; i<N; ++i) in[i] = (float)i;
// Run the code on the CPU
seq_run(in, seq_out, N);
// Run the code on the GPU
gpu_run(in, gpu_out, N);
// Now validate results:
int passed = 0;
int invalid = 0;
for (int i = 0; i < N; ++i) {
if (fabs(seq_out[i] - gpu_out[i]) < 0.0001)
passed++;
else invalid++;
}
printf("Passed: %06d, Invalid: %06d\n", passed, invalid);
//DEBUG: Print the first 10 and last 10 values to 10p of precision
// for(int i = 0; i < 10; i++) printf("%6d:\t%.10f\t%.10f\n", i, seq_out[i], gpu_out[i]);
// for(int i = 0; i < 10; i++) printf("%6d:\t%.10f\t%.10f\n", N-i, seq_out[N-i], gpu_out[N-i]);
// Free outpus databases
free(in); free(gpu_out); free(seq_out);
return 0;
}
| 444d74ffdc70e4bfb94cd2b997795cfee7effa9e.cu | #include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#include <time.h>
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 256
#endif
#ifndef N_ELEMS
#define N_ELEMS 753411
#endif
// Src: Lab1-CudaIntro. Get time difference
int timeval_subtract(
struct timeval *result,
struct timeval *t2,
struct timeval *t1)
{
unsigned int resolution = 1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) -
(t1->tv_usec + resolution * t2->tv_sec);
result->tv_sec = diff / resolution; result->tv_usec = diff % resolution;
return (diff<0);
}
__global__ void kernel(float *d_in, float *d_out, int N){
const unsigned int lid = threadIdx.x; // Local id inside a block
const unsigned int gid = blockIdx.x*blockDim.x + lid; // global id
if (gid < N){
d_out[gid] = powf(d_in[gid]/(d_in[gid]-2.3), 3);
}
}
void gpu_run(float* inp, float* out, int N)
{
// Most of this code is stolened from the lab1 slides
// Time tracking vars
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
// Block distr vars
unsigned int block_size = BLOCK_SIZE;
unsigned int num_blocks = ((N + (block_size - 1)) / block_size);
// Memory assignment
unsigned int mem_size = N*sizeof(float);
float* d_in;
float* d_out;
cudaMalloc((void**)&d_in, mem_size);
cudaMalloc((void**)&d_out, mem_size);
// Copy host mem to device
cudaMemcpy(d_in, inp, mem_size, cudaMemcpyHostToDevice);
// Exec kernel(with timetrack)
gettimeofday(&t_start, NULL);
kernel<<<num_blocks, block_size>>>(d_in, d_out, N);
gettimeofday(&t_end, NULL);
// Copy result from device to host
cudaMemcpy(out, d_out, mem_size, cudaMemcpyDeviceToHost);
cudaFree(d_in); cudaFree(d_out);
// Calculate and print time
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("GPU Run took %d microseconds (%.2fms)\n", elapsed, elapsed / 1000.0);
}
void seq_run(float* inp, float* out, int N){
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
for(unsigned int i = 0; i < N; ++i){
out[i] = powf(inp[i]/(inp[i]-2.3), 3);
}
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("CPU Run took %d microseconds (%.2fms)\n", elapsed, elapsed / 1000.0);
}
int main( int argc, char** argv){
unsigned int N = N_ELEMS;
unsigned int mem_size = N*sizeof(float);
// Init memory arrays
float* in = (float*) malloc(mem_size);
float* gpu_out = (float*) malloc(mem_size);
float* seq_out = (float*) malloc(mem_size);
// And init the input array
for (unsigned int i=0; i<N; ++i) in[i] = (float)i;
// Run the code on the CPU
seq_run(in, seq_out, N);
// Run the code on the GPU
gpu_run(in, gpu_out, N);
// Now validate results:
int passed = 0;
int invalid = 0;
for (int i = 0; i < N; ++i) {
if (fabs(seq_out[i] - gpu_out[i]) < 0.0001)
passed++;
else invalid++;
}
printf("Passed: %06d, Invalid: %06d\n", passed, invalid);
//DEBUG: Print the first 10 and last 10 values to 10p of precision
// for(int i = 0; i < 10; i++) printf("%6d:\t%.10f\t%.10f\n", i, seq_out[i], gpu_out[i]);
// for(int i = 0; i < 10; i++) printf("%6d:\t%.10f\t%.10f\n", N-i, seq_out[N-i], gpu_out[N-i]);
// Free outpus databases
free(in); free(gpu_out); free(seq_out);
return 0;
}
|
07e25505a838c8beff664e90ea4b23cc61e5aee6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <algorithm>
__global__ void kernel(int i, double *a)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
a[idx] = i;
}
int main()
{
auto count = 13;
auto n = 1024;
auto n_bytes = n * sizeof(double);
double *a[count];
hipStream_t streams[count];
for (auto i = 0; i < count; ++i)
{
hipStreamCreate(&streams[i]);
hipMallocManaged( (void **)&a[i], n_bytes );
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1024), 0, streams[i], i, a[i]);
hipDeviceSynchronize();
std::cout << std::accumulate(a[i], a[i] + n, 0.0) << std::endl;
hipStreamDestroy(streams[i]);
hipFree(a[i]);
}
return 0;
}
| 07e25505a838c8beff664e90ea4b23cc61e5aee6.cu | #include <iostream>
#include <algorithm>
__global__ void kernel(int i, double *a)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
a[idx] = i;
}
int main()
{
auto count = 13;
auto n = 1024;
auto n_bytes = n * sizeof(double);
double *a[count];
cudaStream_t streams[count];
for (auto i = 0; i < count; ++i)
{
cudaStreamCreate(&streams[i]);
cudaMallocManaged( (void **)&a[i], n_bytes );
kernel<<<1, 1024, 0, streams[i]>>>(i, a[i]);
cudaDeviceSynchronize();
std::cout << std::accumulate(a[i], a[i] + n, 0.0) << std::endl;
cudaStreamDestroy(streams[i]);
cudaFree(a[i]);
}
return 0;
}
|
f2dcfdb6ef5afc54433159dfeff04e721d60ccff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "matrix.h"
#include "rocblas.h"
#include "helper_cuda.h"
#define BLOCK_THREAD_DIM 32
__global__ void generate_dct_matrix_coefficients(double *A, double *AT, double N);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]){
/* Declare all variables.*/
mxGPUArray *imageArray, *result;
double *d_imageArray, *d_result;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
mxInitGPU();
if ((nrhs < 2) || !(mxIsGPUArray(prhs[0])) ) {
mexErrMsgIdAndTxt(errId, errMsg);
}
imageArray = mxGPUCopyFromMxArray(prhs[0]);
int N = mxGetScalar(prhs[1]);
/*
if ((mxGPUGetClassID(imageArray) != mxDOUBLE_CLASS) ) {
mexErrMsgIdAndTxt(errId, errMsg);
}*/
d_imageArray = (double *)(mxGPUGetData(imageArray));
/* Create a GPUArray to hold the result and get its underlying pointer. */
result = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(imageArray),
mxGPUGetDimensions(imageArray),
mxGPUGetClassID(imageArray),
mxGPUGetComplexity(imageArray),
MX_GPU_INITIALIZE_VALUES );
d_result = (double *)(mxGPUGetData(result));
double * d_A, *d_AT, *d_C;
hipMalloc((void**)&d_A, N*N*sizeof(double));
hipMalloc((void**)&d_AT, N*N*sizeof(double));
hipMalloc((void**)&d_C, N*N*sizeof(double));
const double C_beta = 0.0;
const double alpha = 1.0;
// YOUR CODE HERE
hipError_t status;
hipblasStatus_t cublas_status;
// ---------------------------------- CUBLAS initialization ---------------------------------------
hipblasHandle_t cublas_handle;
hipblasCreate(&cublas_handle);
dim3 blocks(BLOCK_THREAD_DIM, BLOCK_THREAD_DIM);
dim3 grid(N / BLOCK_THREAD_DIM, N / BLOCK_THREAD_DIM);
hipLaunchKernelGGL(( generate_dct_matrix_coefficients), dim3(grid), dim3(blocks), 0, 0, d_A, d_AT, N);
hipDeviceSynchronize();
status = hipGetLastError();
if(status != hipSuccess){
mexErrMsgIdAndTxt(errId, "cuda error code %d\n", status);
}
// dct2: AT*X*A
cublas_status = hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_AT, N, d_imageArray, N, &C_beta, d_C, N);
if(cublas_status != HIPBLAS_STATUS_SUCCESS){
mexErrMsgIdAndTxt(errId, "cublas error code %d\n", cublas_status);
}
cublas_status = hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_C, N, d_A, N, &C_beta, d_result, N);
if(cublas_status != HIPBLAS_STATUS_SUCCESS){
mexErrMsgIdAndTxt(errId, "cublas error code %d\n", cublas_status);
}
// idct2: A*X*AT
cublas_status = hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_A, N, d_result, N, &C_beta, d_C, N);
if(cublas_status != HIPBLAS_STATUS_SUCCESS){
mexErrMsgIdAndTxt(errId, "cublas error code %d\n", cublas_status);
}
cublas_status = hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_C, N, d_AT, N, &C_beta, d_result, N);
if(cublas_status != HIPBLAS_STATUS_SUCCESS){
mexErrMsgIdAndTxt(errId, "cublas error code %d\n", cublas_status);
}
hipFree(d_A);
hipFree(d_AT);
hipFree(d_C);
hipblasDestroy(cublas_handle);
plhs[0] = mxGPUCreateMxArrayOnGPU(result);
mxGPUDestroyGPUArray(imageArray);
mxGPUDestroyGPUArray(result);
}
__global__ void generate_dct_matrix_coefficients(double *A, double *AT, double N){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
double lx = 1.0 + (1.0)*(x>0);
double ly = 1.0 + (1.0)*(y>0);
// row major order
// A[x + y*N] = cospi((2*x+1)*y/(2*N));
int n = N;
// column major order
AT[x + y*n] = sqrt(lx/N) * cospi((2.0*y+1.0)*x/(2.0*N));
A[x + y*n] = sqrt(ly/N) * cospi((2.0*x+1.0)*y/(2.0*N));
} | f2dcfdb6ef5afc54433159dfeff04e721d60ccff.cu | #include "mex.h"
#include "gpu/mxGPUArray.h"
#include "matrix.h"
#include "cublas_v2.h"
#include "helper_cuda.h"
#define BLOCK_THREAD_DIM 32
__global__ void generate_dct_matrix_coefficients(double *A, double *AT, double N);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]){
/* Declare all variables.*/
mxGPUArray *imageArray, *result;
double *d_imageArray, *d_result;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
mxInitGPU();
if ((nrhs < 2) || !(mxIsGPUArray(prhs[0])) ) {
mexErrMsgIdAndTxt(errId, errMsg);
}
imageArray = mxGPUCopyFromMxArray(prhs[0]);
int N = mxGetScalar(prhs[1]);
/*
if ((mxGPUGetClassID(imageArray) != mxDOUBLE_CLASS) ) {
mexErrMsgIdAndTxt(errId, errMsg);
}*/
d_imageArray = (double *)(mxGPUGetData(imageArray));
/* Create a GPUArray to hold the result and get its underlying pointer. */
result = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(imageArray),
mxGPUGetDimensions(imageArray),
mxGPUGetClassID(imageArray),
mxGPUGetComplexity(imageArray),
MX_GPU_INITIALIZE_VALUES );
d_result = (double *)(mxGPUGetData(result));
double * d_A, *d_AT, *d_C;
cudaMalloc((void**)&d_A, N*N*sizeof(double));
cudaMalloc((void**)&d_AT, N*N*sizeof(double));
cudaMalloc((void**)&d_C, N*N*sizeof(double));
const double C_beta = 0.0;
const double alpha = 1.0;
// YOUR CODE HERE
cudaError_t status;
cublasStatus_t cublas_status;
// ---------------------------------- CUBLAS initialization ---------------------------------------
cublasHandle_t cublas_handle;
cublasCreate(&cublas_handle);
dim3 blocks(BLOCK_THREAD_DIM, BLOCK_THREAD_DIM);
dim3 grid(N / BLOCK_THREAD_DIM, N / BLOCK_THREAD_DIM);
generate_dct_matrix_coefficients<<<grid, blocks>>>(d_A, d_AT, N);
cudaDeviceSynchronize();
status = cudaGetLastError();
if(status != cudaSuccess){
mexErrMsgIdAndTxt(errId, "cuda error code %d\n", status);
}
// dct2: AT*X*A
cublas_status = cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_AT, N, d_imageArray, N, &C_beta, d_C, N);
if(cublas_status != CUBLAS_STATUS_SUCCESS){
mexErrMsgIdAndTxt(errId, "cublas error code %d\n", cublas_status);
}
cublas_status = cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_C, N, d_A, N, &C_beta, d_result, N);
if(cublas_status != CUBLAS_STATUS_SUCCESS){
mexErrMsgIdAndTxt(errId, "cublas error code %d\n", cublas_status);
}
// idct2: A*X*AT
cublas_status = cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_A, N, d_result, N, &C_beta, d_C, N);
if(cublas_status != CUBLAS_STATUS_SUCCESS){
mexErrMsgIdAndTxt(errId, "cublas error code %d\n", cublas_status);
}
cublas_status = cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_C, N, d_AT, N, &C_beta, d_result, N);
if(cublas_status != CUBLAS_STATUS_SUCCESS){
mexErrMsgIdAndTxt(errId, "cublas error code %d\n", cublas_status);
}
cudaFree(d_A);
cudaFree(d_AT);
cudaFree(d_C);
cublasDestroy(cublas_handle);
plhs[0] = mxGPUCreateMxArrayOnGPU(result);
mxGPUDestroyGPUArray(imageArray);
mxGPUDestroyGPUArray(result);
}
__global__ void generate_dct_matrix_coefficients(double *A, double *AT, double N){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
double lx = 1.0 + (1.0)*(x>0);
double ly = 1.0 + (1.0)*(y>0);
// row major order
// A[x + y*N] = cospi((2*x+1)*y/(2*N));
int n = N;
// column major order
AT[x + y*n] = sqrt(lx/N) * cospi((2.0*y+1.0)*x/(2.0*N));
A[x + y*n] = sqrt(ly/N) * cospi((2.0*x+1.0)*y/(2.0*N));
} |
766f0e63afc143abba0087d5c91aa2cd48d65c5d.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2020 Simon Donn, Max Planck Institute for Intelligent Systems, Tuebingen, Germany
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "PermutohedralLatticeGPU.cuh"
#include "DeviceMemoryAllocator.h"
#include <vector>
// for kernels that are actually only implemented in single-precision
// (here because of needing atomicMinf)
#define AT_DISPATCH_SINGLE_FLOAT(TYPE, NAME, ...) \
[&] { \
const at::Type& the_type = TYPE; \
switch (the_type.scalarType()) { \
AT_PRIVATE_CASE_TYPE(at::ScalarType::Float, float, __VA_ARGS__) \
default: \
AT_ERROR(#NAME, " not implemented for '", the_type.toString(), "'"); \
} \
}()
template <typename scalar_t>
__inline__ __device__ scalar_t TOME_get_point_depth(scalar_t* __restrict__ camera, scalar_t* __restrict__ win) {
return camera[8]*win[0] + camera[9]*win[1] + camera[10]*win[2]+ camera[11];
}
template <typename scalar_t>
__inline__ __device__ bool TOME_project_point(scalar_t* __restrict__ camera, scalar_t* __restrict__ win, int *out, int input_width, int input_height) {
scalar_t cx = camera[0]*win[0] + camera[1]*win[1] + camera[2]*win[2] + camera[3];
scalar_t cy = camera[4]*win[0] + camera[5]*win[1] + camera[6]*win[2] + camera[7];
scalar_t cz = TOME_get_point_depth(camera, win);
out[0] = int(cx / cz + 0.5f);
out[1] = int(cy / cz + 0.5f);
return (out[0] >= 0) && (out[1] >= 0) && (out[0]<input_width) && (out[1]<input_height);
}
template <typename scalar_t>
__inline__ __device__ bool TOME_project_pointf(scalar_t* __restrict__ camera, scalar_t* __restrict__ win, scalar_t* __restrict__ out, int input_width, int input_height) {
scalar_t cx = camera[0]*win[0] + camera[1]*win[1] + camera[2]*win[2] + camera[3];
scalar_t cy = camera[4]*win[0] + camera[5]*win[1] + camera[6]*win[2] + camera[7];
scalar_t cz = TOME_get_point_depth(camera, win);
out[0] = cx / cz;
out[1] = cy / cz;
return (out[0] >= 0) && (out[1] >= 0) && (out[0]<=input_width-1.0f) && (out[1]<=input_height-1.0f);
}
template <typename scalar_t>
__inline__ __device__ void TOME_unproject_point(scalar_t* __restrict__ camloc, scalar_t* __restrict__ invKR, int u, int v, scalar_t z, scalar_t* __restrict__ out) {
out[0] = camloc[0] + (invKR[0] * u + invKR[1] * v + invKR[2]) * z;
out[1] = camloc[1] + (invKR[3] * u + invKR[4] * v + invKR[5]) * z;
out[2] = camloc[2] + (invKR[6] * u + invKR[7] * v + invKR[8]) * z;
}
__device__ static float TOME_atomicMinf(float* addr, float val)
{
float old;
old = (val >= 0) ? __int_as_float(atomicMin((int *)addr, __float_as_int(val))) :
__uint_as_float(atomicMax((unsigned int *)addr, __float_as_uint(val)));
return old;
}
// input depth: BxHxW depth tensor
// output depth: BxKxHxW depth tensor
// cameras: BxKx3x4 tensor (receiving cameras)
// invKRs: Bx3x3 tensor (central camera)
// camlocs: Bx3x1 tensor (central camera)
template <typename scalar_t>
__global__ void depth_reprojection_cuda_kernel(
scalar_t* __restrict__ input,
scalar_t* __restrict__ output,
scalar_t* __restrict__ cameras,
scalar_t* __restrict__ invKRs,
scalar_t* __restrict__ camlocs,
int B,
int K,
int inH, int inW,
int outH, int outW)
{
int proj[2];
scalar_t wloc[3];
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < B; b += blockDim.x * gridDim.x) {
scalar_t* camloc = camlocs + b * 3;
scalar_t* invKR = invKRs + b * 9;
for (int h = blockIdx.y * blockDim.y + threadIdx.y; h < inH; h += blockDim.y * gridDim.y) {
for (int w = blockIdx.z * blockDim.z + threadIdx.z; w < inW; w += blockDim.z * gridDim.z) {
// cast this point into space
scalar_t depth = input[b * inH * inW + h * inW + w];
if(depth > 0) {
for (int k = 0; k < K; k++) {
scalar_t* camera = cameras + b * K * 12 + k * 12;
TOME_unproject_point(camloc, invKR, w, h, depth, wloc);
// project it onto the first camera again
if(TOME_project_point(camera, wloc, proj, outW, outH)) {
TOME_atomicMinf(
output + b * K * outH * outW + k * outH * outW + proj[1] * outW + proj[0],
TOME_get_point_depth(camera, wloc)
);
}
}
}
}
}
}
}
at::Tensor depth_reprojection_cuda(
at::Tensor input_depth,
at::Tensor cameras,
at::Tensor invKR,
at::Tensor camloc,
int outH,
int outW) {
auto blkdim = 16;
const auto B = cameras.size(0);
const auto K = cameras.size(1);
const auto inH = input_depth.size(1);
const auto inW = input_depth.size(2);
const dim3 block = dim3(1, blkdim, blkdim);
const dim3 grid = dim3(1, 8, 8);
auto output_depth = at::zeros({B, K, outH, outW}, input_depth.type());
auto sentinel = 1e9;
output_depth.fill_(sentinel);
if(input_depth.type().scalarType() == at::ScalarType::Float) {
hipLaunchKernelGGL(( depth_reprojection_cuda_kernel<float>), dim3(grid), dim3(block), 0, 0,
input_depth.data<float>(),
output_depth.data<float>(),
cameras.data<float>(),
invKR.data<float>(),
camloc.data<float>(),
B, K, inH, inW, outH, outW);
}
else{
AT_ERROR("depth_reprojection_cuda not implemented for '", input_depth.type().toString(), "'");
}
output_depth.fmod_(sentinel);
return output_depth;
}
// input depth: BxinHxinW depth tensor
// output depth: BxKxoutHxoutW depth tensor
// cameras: Bx3x4 tensor (central camera)
// invKRs: BxKx3x3 tensor (receiving cameras)
// camlocs: BxKx3x1 tensor (receiving cameras)
template <typename scalar_t>
__global__ void depth_reprojection_bound_cuda_kernel(
scalar_t *input,
scalar_t *output,
scalar_t *cameras,
scalar_t *invKRs,
scalar_t *camlocs,
int B,
int K,
int inH,
int inW,
int outH,
int outW,
scalar_t dmin,
scalar_t dmax,
scalar_t dstep)
{
int proj[2];
scalar_t wloc[3];
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < B; b += blockDim.x * gridDim.x) {
for (int k = 0; k < K; k++) {
scalar_t* camloc = camlocs + b * K * 3 + k * 3;
scalar_t* invKR = invKRs + b * K * 9 + k * 9;
scalar_t *camera = cameras + b * 12;
for (int h = blockIdx.y * blockDim.y + threadIdx.y; h < outH; h += blockDim.y * gridDim.y) {
for (int w = blockIdx.z * blockDim.z + threadIdx.z; w < outW; w += blockDim.z * gridDim.z) {
// cast this point into space at increasingly large depths (from camera 0)
// the first depth at which it is invisible in view n (i.e. lies behind its depth map)
// that is the lowest permissible depth for this pixel according to that view
// for very sharp depth edges, this results in an interpolation of the depth map
// for aliased reprojections, this results in a filling of the holes
// bool projected_in = false;
scalar_t dhyp = dmin;
for (; dhyp <= dmax; dhyp += dstep) {
TOME_unproject_point(camloc, invKR, w, h, dhyp, wloc);
// project it onto the first camera again
if(TOME_project_point(camera, wloc, proj, inW, inH)) {
// projected_in = true;
scalar_t dhyp_depth_n = TOME_get_point_depth(camera, wloc);
scalar_t depth_n = input[b * inH * inW + proj[1] * inW + proj[0]];
if(dhyp_depth_n > depth_n && depth_n > 0) {
break;
}
}
// else if (projected_in) {
// // just give up -- no value here is acceptable
// // dhyp = dmax;
// break;
// }
}
if(dhyp < dmax) {
// refine the estimate
scalar_t ndhyp = dhyp;
for (; ndhyp >= dhyp - dstep; ndhyp -= dstep/10) {
TOME_unproject_point(camloc, invKR, w, h, ndhyp, wloc);
// project it onto the first camera again
if(TOME_project_point(camera, wloc, proj, inW, inH)) {
scalar_t dhyp_depth_n = TOME_get_point_depth(camera, wloc);
scalar_t depth_n = input[b * inH * inW + proj[1] * inW + proj[0]];
if(dhyp_depth_n < depth_n) {
break;
}
}
else {
break;
}
}
dhyp = ndhyp;
for (; ndhyp < dhyp + dstep/10; ndhyp += dstep/50) {
TOME_unproject_point(camloc, invKR, w, h, ndhyp, wloc);
// project it onto the first camera again
if(TOME_project_point(camera, wloc, proj, inW, inH)) {
scalar_t dhyp_depth_n = TOME_get_point_depth(camera, wloc);
scalar_t depth_n = input[b * inH * inW + proj[1] * inW + proj[0]];
if(dhyp_depth_n > depth_n && depth_n > 0) {
break;
}
}
else {
break;
}
}
dhyp = ndhyp;
}
else {
dhyp = 0.0f;
}
output[b * K * outH * outW + k * outH * outW + h * outW + w] = dhyp;
}
}
}
}
}
at::Tensor depth_reprojection_bound_cuda(
at::Tensor input_depth,
at::Tensor cameras,
at::Tensor invKR,
at::Tensor camloc,
int outH, int outW,
float dmin,
float dmax,
float dstep) {
auto blkdim = 16;
const auto B = invKR.size(0);
const auto K = invKR.size(1);
const auto inH = input_depth.size(1);
const auto inW = input_depth.size(2);
const dim3 block = dim3(1, blkdim, blkdim);
const dim3 grid = dim3(1, 8, 8);
auto output_depth = at::zeros({B, K, outH, outW}, input_depth.type());
auto sentinel = 1e9;
output_depth.fill_(sentinel);
if(input_depth.type().scalarType() == at::ScalarType::Float) {
hipLaunchKernelGGL(( depth_reprojection_bound_cuda_kernel<float>), dim3(grid), dim3(block), 0, 0,
input_depth.data<float>(),
output_depth.data<float>(),
cameras.data<float>(),
invKR.data<float>(),
camloc.data<float>(),
B, K, inH, inW, outH, outW, dmin, dmax, dstep);
}
else{
AT_ERROR("depth_reprojection_bound_cuda_kernel not implemented for '", input_depth.type().toString(), "'");
}
output_depth.fmod_(sentinel);
return output_depth;
}
// input depth: BxHxW depth tensor
// output depth: BxKxHxW depth tensor
// cameras: BxKx3x4 tensor (receiving cameras)
// invKRs: Bx3x3 tensor (central camera)
// camlocs: Bx3x1 tensor (central camera)
template <typename scalar_t>
__global__ void depth_reprojection_splat_cuda_kernel(
scalar_t* __restrict__ input,
scalar_t* __restrict__ output_depth,
scalar_t* __restrict__ output_weights,
scalar_t* __restrict__ cameras,
scalar_t* __restrict__ invKRs,
scalar_t* __restrict__ camlocs,
scalar_t radius,
scalar_t depth_scale,
int B,
int K,
int inH, int inW,
int outH, int outW)
{
scalar_t proj[2];
scalar_t wloc[3];
// twice the stddev: 95% of the mass
int iradius = int(ceil(2*radius));
scalar_t expdiv = radius>0?2*radius*radius:1.0;
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < B; b += blockDim.x * gridDim.x) {
scalar_t* camloc = camlocs + b * 3;
scalar_t* invKR = invKRs + b * 9;
for (int h = blockIdx.y * blockDim.y + threadIdx.y; h < inH; h += blockDim.y * gridDim.y) {
for (int w = blockIdx.z * blockDim.z + threadIdx.z; w < inW; w += blockDim.z * gridDim.z) {
// cast this point into space
scalar_t depth = input[b * inH * inW + h * inW + w];
if(depth > 0) {
TOME_unproject_point(camloc, invKR, w, h, depth, wloc);
for (int k = 0; k < K; k++) {
scalar_t* camera = cameras + b * K * 12 + k * 12;
TOME_project_pointf(camera, wloc, proj, outW, outH);
scalar_t depth_k = TOME_get_point_depth(camera, wloc);
int px = int(floor(proj[0]+0.5f));
int py = int(floor(proj[1]+0.5f));
for(int xk = max(0, px - iradius); xk <= min(px + iradius, outW-1); xk++) {
for(int yk = max(0, py - iradius); yk <= min(py + iradius, outH-1); yk++) {
scalar_t dist_k = (xk-proj[0])*(xk-proj[0]) + (yk-proj[1])*(yk-proj[1]);
// mass: what fraction of the blob in this pixel
scalar_t mass_k = exp(-dist_k / expdiv);
// weight: softmaxing depth in this pixel
scalar_t weight_k = exp(-depth_k / depth_scale);
atomicAdd(
output_depth + b * K * outH * outW + k * outH * outW + yk * outW + xk,
depth_k * mass_k * weight_k
);
atomicAdd(
output_weights + b * K * outH * outW + k * outH * outW + yk * outW + xk,
mass_k * weight_k
);
}
}
}
}
}
}
}
}
template <typename scalar_t>
__global__ void depth_reprojection_splat_visibilities_cuda_kernel(
scalar_t* __restrict__ input,
scalar_t* __restrict__ output_depth,
scalar_t* __restrict__ output_visibilities,
scalar_t* __restrict__ cameras,
scalar_t* __restrict__ invKRs,
scalar_t* __restrict__ camlocs,
scalar_t radius,
scalar_t depth_scale,
int B,
int K,
int inH, int inW,
int outH, int outW)
{
scalar_t proj[2];
scalar_t wloc[3];
// twice the stddev: 95% of the mass
int iradius = int(ceil(2*radius));
scalar_t expdiv = radius>0?2*radius*radius:1.0;
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < B; b += blockDim.x * gridDim.x) {
scalar_t* camloc = camlocs + b * 3;
scalar_t* invKR = invKRs + b * 9;
for (int h = blockIdx.y * blockDim.y + threadIdx.y; h < inH; h += blockDim.y * gridDim.y) {
for (int w = blockIdx.z * blockDim.z + threadIdx.z; w < inW; w += blockDim.z * gridDim.z) {
// cast this point into space
scalar_t depth = input[b * inH * inW + h * inW + w];
if(depth > 0) {
TOME_unproject_point(camloc, invKR, w, h, depth, wloc);
for (int k = 0; k < K; k++) {
scalar_t* camera = cameras + b * K * 12 + k * 12;
TOME_project_pointf(camera, wloc, proj, outW, outH);
scalar_t depth_k = TOME_get_point_depth(camera, wloc);
scalar_t visiblemass_sum = 0;
scalar_t mass_sum = 0;
int px = int(floor(proj[0]+0.5f));
int py = int(floor(proj[1]+0.5f));
for(int xk = max(0, px - iradius); xk <= min(px + iradius, outW-1); xk++) {
for(int yk = max(0, py - iradius); yk <= min(py + iradius, outH-1); yk++) {
scalar_t dist_k = (xk-proj[0])*(xk-proj[0]) + (yk-proj[1])*(yk-proj[1]);
// mass: what fraction of the blob in this pixel
scalar_t mass_k = exp(-dist_k / expdiv);
scalar_t zbuffer_k = output_depth[b * K * outH * outW + k * outH * outW + yk * outW + xk];
// weight: softmaxing depth in this pixel
scalar_t visibility_k = exp((zbuffer_k - depth_k) / depth_scale);
visibility_k = min(visibility_k, 1.0);
visiblemass_sum += mass_k * visibility_k;
mass_sum += mass_k;
}
}
if(mass_sum > 0) {
output_visibilities[
b * K * inH * inW + k * inH * inW + h * inW + w
] = visiblemass_sum / mass_sum;
}
}
}
}
}
}
}
std::vector<at::Tensor> depth_reprojection_splat_cuda(
at::Tensor input_depth,
at::Tensor cameras,
at::Tensor invKR,
at::Tensor camloc,
float radius,
float zbuffer_scale,
float visibility_scale,
int outH,
int outW) {
auto blkdim = 16;
const auto B = cameras.size(0);
const auto K = cameras.size(1);
const auto inH = input_depth.size(1);
const auto inW = input_depth.size(2);
const dim3 block = dim3(1, blkdim, blkdim);
const dim3 grid = dim3(1, 8, 8);
auto output_depth = at::zeros({B, K, outH, outW}, input_depth.type());
auto output_weights = at::zeros({B, K, outH, outW}, input_depth.type());
auto output_visibilities = at::zeros({B, K, inH, inW}, input_depth.type());
if(input_depth.type().scalarType() == at::ScalarType::Float) {
hipLaunchKernelGGL(( depth_reprojection_splat_cuda_kernel<float>), dim3(grid), dim3(block), 0, 0,
input_depth.data<float>(),
output_depth.data<float>(),
output_weights.data<float>(),
cameras.data<float>(),
invKR.data<float>(),
camloc.data<float>(),
radius, zbuffer_scale,
B, K, inH, inW, outH, outW);
output_depth.div_(output_weights);
hipLaunchKernelGGL(( depth_reprojection_splat_visibilities_cuda_kernel<float>), dim3(grid), dim3(block), 0, 0,
input_depth.data<float>(),
output_depth.data<float>(),
output_visibilities.data<float>(),
cameras.data<float>(),
invKR.data<float>(),
camloc.data<float>(),
radius, visibility_scale,
B, K, inH, inW, outH, outW);
}
else{
AT_ERROR("depth_reprojection_splat_cuda not implemented for '", input_depth.type().toString(), "'");
}
return {output_depth, output_weights, output_visibilities};
}
at::Tensor permutohedral_filter_cuda(
at::Tensor input,
at::Tensor positions,
at::Tensor weights,
bool reverse
) {
auto blkdim = 16;
const auto H = input.size(0);
const auto W = input.size(1);
const auto num_pixels = H*W;
const dim3 block = dim3(1, blkdim, blkdim);
const dim3 grid = dim3(1, 8, 8);
const auto pd = positions.size(2);
const auto id = input.size(2);
auto output = at::zeros({H, W, id}, input.type());
auto allocator = DeviceMemoryAllocator();
if(input.type().scalarType() == at::ScalarType::Float) {
if(pd == 5 && id == 3) {
auto lattice = PermutohedralLatticeGPU<float, 5, 4>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 2 && id == 3) {
auto lattice = PermutohedralLatticeGPU<float, 2, 4>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 2 && id == 2) {
auto lattice = PermutohedralLatticeGPU<float, 2, 3>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 2 && id == 1) {
auto lattice = PermutohedralLatticeGPU<float, 2, 2>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 3 && id == 1) {
auto lattice = PermutohedralLatticeGPU<float, 3, 2>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 3 && id == 2) {
auto lattice = PermutohedralLatticeGPU<float, 3, 3>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 3 && id == 3) {
auto lattice = PermutohedralLatticeGPU<float, 3, 4>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 2) {
auto lattice = PermutohedralLatticeGPU<float, 6, 3>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 3) {
auto lattice = PermutohedralLatticeGPU<float, 6, 4>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 4) {
auto lattice = PermutohedralLatticeGPU<float, 6, 5>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 5) {
auto lattice = PermutohedralLatticeGPU<float, 6, 6>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 6) {
auto lattice = PermutohedralLatticeGPU<float, 6, 7>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 7) {
auto lattice = PermutohedralLatticeGPU<float, 6, 8>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 8) {
auto lattice = PermutohedralLatticeGPU<float, 6, 9>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else{
AT_ASSERTM(false, "permutohedral filter: this (pd,id) is not present in the compiled binary");
}
}
else{
AT_ERROR("permutohedral_filter_cuda not implemented for '", input.type().toString(), "'");
}
return output;
} | 766f0e63afc143abba0087d5c91aa2cd48d65c5d.cu | /*
Copyright (c) 2020 Simon Donné, Max Planck Institute for Intelligent Systems, Tuebingen, Germany
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "PermutohedralLatticeGPU.cuh"
#include "DeviceMemoryAllocator.h"
#include <vector>
// for kernels that are actually only implemented in single-precision
// (here because of needing atomicMinf)
#define AT_DISPATCH_SINGLE_FLOAT(TYPE, NAME, ...) \
[&] { \
const at::Type& the_type = TYPE; \
switch (the_type.scalarType()) { \
AT_PRIVATE_CASE_TYPE(at::ScalarType::Float, float, __VA_ARGS__) \
default: \
AT_ERROR(#NAME, " not implemented for '", the_type.toString(), "'"); \
} \
}()
template <typename scalar_t>
__inline__ __device__ scalar_t TOME_get_point_depth(scalar_t* __restrict__ camera, scalar_t* __restrict__ win) {
return camera[8]*win[0] + camera[9]*win[1] + camera[10]*win[2]+ camera[11];
}
template <typename scalar_t>
__inline__ __device__ bool TOME_project_point(scalar_t* __restrict__ camera, scalar_t* __restrict__ win, int *out, int input_width, int input_height) {
scalar_t cx = camera[0]*win[0] + camera[1]*win[1] + camera[2]*win[2] + camera[3];
scalar_t cy = camera[4]*win[0] + camera[5]*win[1] + camera[6]*win[2] + camera[7];
scalar_t cz = TOME_get_point_depth(camera, win);
out[0] = int(cx / cz + 0.5f);
out[1] = int(cy / cz + 0.5f);
return (out[0] >= 0) && (out[1] >= 0) && (out[0]<input_width) && (out[1]<input_height);
}
template <typename scalar_t>
__inline__ __device__ bool TOME_project_pointf(scalar_t* __restrict__ camera, scalar_t* __restrict__ win, scalar_t* __restrict__ out, int input_width, int input_height) {
scalar_t cx = camera[0]*win[0] + camera[1]*win[1] + camera[2]*win[2] + camera[3];
scalar_t cy = camera[4]*win[0] + camera[5]*win[1] + camera[6]*win[2] + camera[7];
scalar_t cz = TOME_get_point_depth(camera, win);
out[0] = cx / cz;
out[1] = cy / cz;
return (out[0] >= 0) && (out[1] >= 0) && (out[0]<=input_width-1.0f) && (out[1]<=input_height-1.0f);
}
template <typename scalar_t>
__inline__ __device__ void TOME_unproject_point(scalar_t* __restrict__ camloc, scalar_t* __restrict__ invKR, int u, int v, scalar_t z, scalar_t* __restrict__ out) {
out[0] = camloc[0] + (invKR[0] * u + invKR[1] * v + invKR[2]) * z;
out[1] = camloc[1] + (invKR[3] * u + invKR[4] * v + invKR[5]) * z;
out[2] = camloc[2] + (invKR[6] * u + invKR[7] * v + invKR[8]) * z;
}
__device__ static float TOME_atomicMinf(float* addr, float val)
{
float old;
old = (val >= 0) ? __int_as_float(atomicMin((int *)addr, __float_as_int(val))) :
__uint_as_float(atomicMax((unsigned int *)addr, __float_as_uint(val)));
return old;
}
// input depth: BxHxW depth tensor
// output depth: BxKxHxW depth tensor
// cameras: BxKx3x4 tensor (receiving cameras)
// invKRs: Bx3x3 tensor (central camera)
// camlocs: Bx3x1 tensor (central camera)
template <typename scalar_t>
__global__ void depth_reprojection_cuda_kernel(
scalar_t* __restrict__ input,
scalar_t* __restrict__ output,
scalar_t* __restrict__ cameras,
scalar_t* __restrict__ invKRs,
scalar_t* __restrict__ camlocs,
int B,
int K,
int inH, int inW,
int outH, int outW)
{
int proj[2];
scalar_t wloc[3];
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < B; b += blockDim.x * gridDim.x) {
scalar_t* camloc = camlocs + b * 3;
scalar_t* invKR = invKRs + b * 9;
for (int h = blockIdx.y * blockDim.y + threadIdx.y; h < inH; h += blockDim.y * gridDim.y) {
for (int w = blockIdx.z * blockDim.z + threadIdx.z; w < inW; w += blockDim.z * gridDim.z) {
// cast this point into space
scalar_t depth = input[b * inH * inW + h * inW + w];
if(depth > 0) {
for (int k = 0; k < K; k++) {
scalar_t* camera = cameras + b * K * 12 + k * 12;
TOME_unproject_point(camloc, invKR, w, h, depth, wloc);
// project it onto the first camera again
if(TOME_project_point(camera, wloc, proj, outW, outH)) {
TOME_atomicMinf(
output + b * K * outH * outW + k * outH * outW + proj[1] * outW + proj[0],
TOME_get_point_depth(camera, wloc)
);
}
}
}
}
}
}
}
at::Tensor depth_reprojection_cuda(
at::Tensor input_depth,
at::Tensor cameras,
at::Tensor invKR,
at::Tensor camloc,
int outH,
int outW) {
auto blkdim = 16;
const auto B = cameras.size(0);
const auto K = cameras.size(1);
const auto inH = input_depth.size(1);
const auto inW = input_depth.size(2);
const dim3 block = dim3(1, blkdim, blkdim);
const dim3 grid = dim3(1, 8, 8);
auto output_depth = at::zeros({B, K, outH, outW}, input_depth.type());
auto sentinel = 1e9;
output_depth.fill_(sentinel);
if(input_depth.type().scalarType() == at::ScalarType::Float) {
depth_reprojection_cuda_kernel<float><<<grid, block>>>(
input_depth.data<float>(),
output_depth.data<float>(),
cameras.data<float>(),
invKR.data<float>(),
camloc.data<float>(),
B, K, inH, inW, outH, outW);
}
else{
AT_ERROR("depth_reprojection_cuda not implemented for '", input_depth.type().toString(), "'");
}
output_depth.fmod_(sentinel);
return output_depth;
}
// input depth: BxinHxinW depth tensor
// output depth: BxKxoutHxoutW depth tensor
// cameras: Bx3x4 tensor (central camera)
// invKRs: BxKx3x3 tensor (receiving cameras)
// camlocs: BxKx3x1 tensor (receiving cameras)
template <typename scalar_t>
__global__ void depth_reprojection_bound_cuda_kernel(
scalar_t *input,
scalar_t *output,
scalar_t *cameras,
scalar_t *invKRs,
scalar_t *camlocs,
int B,
int K,
int inH,
int inW,
int outH,
int outW,
scalar_t dmin,
scalar_t dmax,
scalar_t dstep)
{
int proj[2];
scalar_t wloc[3];
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < B; b += blockDim.x * gridDim.x) {
for (int k = 0; k < K; k++) {
scalar_t* camloc = camlocs + b * K * 3 + k * 3;
scalar_t* invKR = invKRs + b * K * 9 + k * 9;
scalar_t *camera = cameras + b * 12;
for (int h = blockIdx.y * blockDim.y + threadIdx.y; h < outH; h += blockDim.y * gridDim.y) {
for (int w = blockIdx.z * blockDim.z + threadIdx.z; w < outW; w += blockDim.z * gridDim.z) {
// cast this point into space at increasingly large depths (from camera 0)
// the first depth at which it is invisible in view n (i.e. lies behind its depth map)
// that is the lowest permissible depth for this pixel according to that view
// for very sharp depth edges, this results in an interpolation of the depth map
// for aliased reprojections, this results in a filling of the holes
// bool projected_in = false;
scalar_t dhyp = dmin;
for (; dhyp <= dmax; dhyp += dstep) {
TOME_unproject_point(camloc, invKR, w, h, dhyp, wloc);
// project it onto the first camera again
if(TOME_project_point(camera, wloc, proj, inW, inH)) {
// projected_in = true;
scalar_t dhyp_depth_n = TOME_get_point_depth(camera, wloc);
scalar_t depth_n = input[b * inH * inW + proj[1] * inW + proj[0]];
if(dhyp_depth_n > depth_n && depth_n > 0) {
break;
}
}
// else if (projected_in) {
// // just give up -- no value here is acceptable
// // dhyp = dmax;
// break;
// }
}
if(dhyp < dmax) {
// refine the estimate
scalar_t ndhyp = dhyp;
for (; ndhyp >= dhyp - dstep; ndhyp -= dstep/10) {
TOME_unproject_point(camloc, invKR, w, h, ndhyp, wloc);
// project it onto the first camera again
if(TOME_project_point(camera, wloc, proj, inW, inH)) {
scalar_t dhyp_depth_n = TOME_get_point_depth(camera, wloc);
scalar_t depth_n = input[b * inH * inW + proj[1] * inW + proj[0]];
if(dhyp_depth_n < depth_n) {
break;
}
}
else {
break;
}
}
dhyp = ndhyp;
for (; ndhyp < dhyp + dstep/10; ndhyp += dstep/50) {
TOME_unproject_point(camloc, invKR, w, h, ndhyp, wloc);
// project it onto the first camera again
if(TOME_project_point(camera, wloc, proj, inW, inH)) {
scalar_t dhyp_depth_n = TOME_get_point_depth(camera, wloc);
scalar_t depth_n = input[b * inH * inW + proj[1] * inW + proj[0]];
if(dhyp_depth_n > depth_n && depth_n > 0) {
break;
}
}
else {
break;
}
}
dhyp = ndhyp;
}
else {
dhyp = 0.0f;
}
output[b * K * outH * outW + k * outH * outW + h * outW + w] = dhyp;
}
}
}
}
}
at::Tensor depth_reprojection_bound_cuda(
at::Tensor input_depth,
at::Tensor cameras,
at::Tensor invKR,
at::Tensor camloc,
int outH, int outW,
float dmin,
float dmax,
float dstep) {
auto blkdim = 16;
const auto B = invKR.size(0);
const auto K = invKR.size(1);
const auto inH = input_depth.size(1);
const auto inW = input_depth.size(2);
const dim3 block = dim3(1, blkdim, blkdim);
const dim3 grid = dim3(1, 8, 8);
auto output_depth = at::zeros({B, K, outH, outW}, input_depth.type());
auto sentinel = 1e9;
output_depth.fill_(sentinel);
if(input_depth.type().scalarType() == at::ScalarType::Float) {
depth_reprojection_bound_cuda_kernel<float><<<grid, block>>>(
input_depth.data<float>(),
output_depth.data<float>(),
cameras.data<float>(),
invKR.data<float>(),
camloc.data<float>(),
B, K, inH, inW, outH, outW, dmin, dmax, dstep);
}
else{
AT_ERROR("depth_reprojection_bound_cuda_kernel not implemented for '", input_depth.type().toString(), "'");
}
output_depth.fmod_(sentinel);
return output_depth;
}
// input depth: BxHxW depth tensor
// output depth: BxKxHxW depth tensor
// cameras: BxKx3x4 tensor (receiving cameras)
// invKRs: Bx3x3 tensor (central camera)
// camlocs: Bx3x1 tensor (central camera)
template <typename scalar_t>
__global__ void depth_reprojection_splat_cuda_kernel(
scalar_t* __restrict__ input,
scalar_t* __restrict__ output_depth,
scalar_t* __restrict__ output_weights,
scalar_t* __restrict__ cameras,
scalar_t* __restrict__ invKRs,
scalar_t* __restrict__ camlocs,
scalar_t radius,
scalar_t depth_scale,
int B,
int K,
int inH, int inW,
int outH, int outW)
{
scalar_t proj[2];
scalar_t wloc[3];
// twice the stddev: 95% of the mass
int iradius = int(ceil(2*radius));
scalar_t expdiv = radius>0?2*radius*radius:1.0;
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < B; b += blockDim.x * gridDim.x) {
scalar_t* camloc = camlocs + b * 3;
scalar_t* invKR = invKRs + b * 9;
for (int h = blockIdx.y * blockDim.y + threadIdx.y; h < inH; h += blockDim.y * gridDim.y) {
for (int w = blockIdx.z * blockDim.z + threadIdx.z; w < inW; w += blockDim.z * gridDim.z) {
// cast this point into space
scalar_t depth = input[b * inH * inW + h * inW + w];
if(depth > 0) {
TOME_unproject_point(camloc, invKR, w, h, depth, wloc);
for (int k = 0; k < K; k++) {
scalar_t* camera = cameras + b * K * 12 + k * 12;
TOME_project_pointf(camera, wloc, proj, outW, outH);
scalar_t depth_k = TOME_get_point_depth(camera, wloc);
int px = int(floor(proj[0]+0.5f));
int py = int(floor(proj[1]+0.5f));
for(int xk = max(0, px - iradius); xk <= min(px + iradius, outW-1); xk++) {
for(int yk = max(0, py - iradius); yk <= min(py + iradius, outH-1); yk++) {
scalar_t dist_k = (xk-proj[0])*(xk-proj[0]) + (yk-proj[1])*(yk-proj[1]);
// mass: what fraction of the blob in this pixel
scalar_t mass_k = exp(-dist_k / expdiv);
// weight: softmaxing depth in this pixel
scalar_t weight_k = exp(-depth_k / depth_scale);
atomicAdd(
output_depth + b * K * outH * outW + k * outH * outW + yk * outW + xk,
depth_k * mass_k * weight_k
);
atomicAdd(
output_weights + b * K * outH * outW + k * outH * outW + yk * outW + xk,
mass_k * weight_k
);
}
}
}
}
}
}
}
}
template <typename scalar_t>
__global__ void depth_reprojection_splat_visibilities_cuda_kernel(
scalar_t* __restrict__ input,
scalar_t* __restrict__ output_depth,
scalar_t* __restrict__ output_visibilities,
scalar_t* __restrict__ cameras,
scalar_t* __restrict__ invKRs,
scalar_t* __restrict__ camlocs,
scalar_t radius,
scalar_t depth_scale,
int B,
int K,
int inH, int inW,
int outH, int outW)
{
scalar_t proj[2];
scalar_t wloc[3];
// twice the stddev: 95% of the mass
int iradius = int(ceil(2*radius));
scalar_t expdiv = radius>0?2*radius*radius:1.0;
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < B; b += blockDim.x * gridDim.x) {
scalar_t* camloc = camlocs + b * 3;
scalar_t* invKR = invKRs + b * 9;
for (int h = blockIdx.y * blockDim.y + threadIdx.y; h < inH; h += blockDim.y * gridDim.y) {
for (int w = blockIdx.z * blockDim.z + threadIdx.z; w < inW; w += blockDim.z * gridDim.z) {
// cast this point into space
scalar_t depth = input[b * inH * inW + h * inW + w];
if(depth > 0) {
TOME_unproject_point(camloc, invKR, w, h, depth, wloc);
for (int k = 0; k < K; k++) {
scalar_t* camera = cameras + b * K * 12 + k * 12;
TOME_project_pointf(camera, wloc, proj, outW, outH);
scalar_t depth_k = TOME_get_point_depth(camera, wloc);
scalar_t visiblemass_sum = 0;
scalar_t mass_sum = 0;
int px = int(floor(proj[0]+0.5f));
int py = int(floor(proj[1]+0.5f));
for(int xk = max(0, px - iradius); xk <= min(px + iradius, outW-1); xk++) {
for(int yk = max(0, py - iradius); yk <= min(py + iradius, outH-1); yk++) {
scalar_t dist_k = (xk-proj[0])*(xk-proj[0]) + (yk-proj[1])*(yk-proj[1]);
// mass: what fraction of the blob in this pixel
scalar_t mass_k = exp(-dist_k / expdiv);
scalar_t zbuffer_k = output_depth[b * K * outH * outW + k * outH * outW + yk * outW + xk];
// weight: softmaxing depth in this pixel
scalar_t visibility_k = exp((zbuffer_k - depth_k) / depth_scale);
visibility_k = min(visibility_k, 1.0);
visiblemass_sum += mass_k * visibility_k;
mass_sum += mass_k;
}
}
if(mass_sum > 0) {
output_visibilities[
b * K * inH * inW + k * inH * inW + h * inW + w
] = visiblemass_sum / mass_sum;
}
}
}
}
}
}
}
std::vector<at::Tensor> depth_reprojection_splat_cuda(
at::Tensor input_depth,
at::Tensor cameras,
at::Tensor invKR,
at::Tensor camloc,
float radius,
float zbuffer_scale,
float visibility_scale,
int outH,
int outW) {
auto blkdim = 16;
const auto B = cameras.size(0);
const auto K = cameras.size(1);
const auto inH = input_depth.size(1);
const auto inW = input_depth.size(2);
const dim3 block = dim3(1, blkdim, blkdim);
const dim3 grid = dim3(1, 8, 8);
auto output_depth = at::zeros({B, K, outH, outW}, input_depth.type());
auto output_weights = at::zeros({B, K, outH, outW}, input_depth.type());
auto output_visibilities = at::zeros({B, K, inH, inW}, input_depth.type());
if(input_depth.type().scalarType() == at::ScalarType::Float) {
depth_reprojection_splat_cuda_kernel<float><<<grid, block>>>(
input_depth.data<float>(),
output_depth.data<float>(),
output_weights.data<float>(),
cameras.data<float>(),
invKR.data<float>(),
camloc.data<float>(),
radius, zbuffer_scale,
B, K, inH, inW, outH, outW);
output_depth.div_(output_weights);
depth_reprojection_splat_visibilities_cuda_kernel<float><<<grid, block>>>(
input_depth.data<float>(),
output_depth.data<float>(),
output_visibilities.data<float>(),
cameras.data<float>(),
invKR.data<float>(),
camloc.data<float>(),
radius, visibility_scale,
B, K, inH, inW, outH, outW);
}
else{
AT_ERROR("depth_reprojection_splat_cuda not implemented for '", input_depth.type().toString(), "'");
}
return {output_depth, output_weights, output_visibilities};
}
at::Tensor permutohedral_filter_cuda(
at::Tensor input,
at::Tensor positions,
at::Tensor weights,
bool reverse
) {
auto blkdim = 16;
const auto H = input.size(0);
const auto W = input.size(1);
const auto num_pixels = H*W;
const dim3 block = dim3(1, blkdim, blkdim);
const dim3 grid = dim3(1, 8, 8);
const auto pd = positions.size(2);
const auto id = input.size(2);
auto output = at::zeros({H, W, id}, input.type());
auto allocator = DeviceMemoryAllocator();
if(input.type().scalarType() == at::ScalarType::Float) {
if(pd == 5 && id == 3) {
auto lattice = PermutohedralLatticeGPU<float, 5, 4>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 2 && id == 3) {
auto lattice = PermutohedralLatticeGPU<float, 2, 4>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 2 && id == 2) {
auto lattice = PermutohedralLatticeGPU<float, 2, 3>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 2 && id == 1) {
auto lattice = PermutohedralLatticeGPU<float, 2, 2>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 3 && id == 1) {
auto lattice = PermutohedralLatticeGPU<float, 3, 2>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 3 && id == 2) {
auto lattice = PermutohedralLatticeGPU<float, 3, 3>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 3 && id == 3) {
auto lattice = PermutohedralLatticeGPU<float, 3, 4>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 2) {
auto lattice = PermutohedralLatticeGPU<float, 6, 3>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 3) {
auto lattice = PermutohedralLatticeGPU<float, 6, 4>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 4) {
auto lattice = PermutohedralLatticeGPU<float, 6, 5>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 5) {
auto lattice = PermutohedralLatticeGPU<float, 6, 6>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 6) {
auto lattice = PermutohedralLatticeGPU<float, 6, 7>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 7) {
auto lattice = PermutohedralLatticeGPU<float, 6, 8>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else if(pd == 6 && id == 8) {
auto lattice = PermutohedralLatticeGPU<float, 6, 9>(num_pixels, &allocator);
lattice.filter(
output.data<float>(),
input.data<float>(),
positions.data<float>(),
weights.data<float>(),
reverse
);
}
else{
AT_ASSERTM(false, "permutohedral filter: this (pd,id) is not present in the compiled binary");
}
}
else{
AT_ERROR("permutohedral_filter_cuda not implemented for '", input.type().toString(), "'");
}
return output;
} |
664dd02eaff60b19b1167d4915906fb52621d6d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define WIDTH 300
#define HEIGHT 300
#define WIDTH_PER_BLOCK 300
#define HEIGHT_PER_BLOCK 1
#define WIDTH_PER_THREADS 1
#define HEIGHT_PER_THREADS 1
#define NUM 16
#define MAX_DEPTH 10
#define SAMPLE 4
#define WARP_SIZE 32
#include <glut/gl3w.h>
#include <Windows.h>
#include <stdio.h>
#include "surface_functions.h"
#include <vector_functions.hpp>
#include <cuda_gl_interop.h>
#include <hip/device_functions.h>
#include "core/PathTracer.cuh"
#include "Camera/Camera.cuh"
#include <glut/glfw3.h>
#include "Shader/myShader.h"
#include "core/PathTracer.cuh"
#include "Ray/Ray.cuh"
#include "PostProcess/PostProcess.cuh"
#include <iostream>
#include <ctime>
#define STB_IMAGE_IMPLEMENTATION
#include "tool/stb_image.h"
hiprandState_t *state;
float *data_tmp;//store the rgb value for scene
int *cnt;//Store the times of rendering
GLuint tex;
GLuint prog;
cudaGraphicsResource *cudaTex;
hipGraphExec_t exe;
hipSurfaceObject_t texture_surface, surface_raw, surface_hdr;
hipStream_t stream;
__constant__ Camera globalCam;
//Initialize the state of rand generator
__global__ void initial(hiprandState_t *state, int *time)
{
int idx = (gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x * blockDim.y + (threadIdx.x * blockDim.y + threadIdx.y);
hiprand_init(idx + 332 ,idx, 0, state + idx);
}
__global__ void debug(hipSurfaceObject_t surface, hipSurfaceObject_t surfacew, Scene *scene, hiprandState_t *state, int *cnt)
{
if (blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0)
++(*cnt);
__syncthreads();
int stx = blockIdx.x * WIDTH_PER_BLOCK + threadIdx.x * WIDTH_PER_THREADS;
int sty = blockIdx.y * HEIGHT_PER_BLOCK + threadIdx.y * HEIGHT_PER_THREADS;
int idx = (gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x * blockDim.y + (threadIdx.x * blockDim.y + threadIdx.y);
hiprandState_t *rstate = state + idx;
//hiprand_init(1234, idx, 0, rstate);//Produce many noises after initialization
IntersectRecord rec;
float3 px_color = BLACK;
Ray r;
float frac1 = 1.0f / *cnt;
float frac2 = frac1 * (*cnt - 1);
float xoff = hiprand_uniform(state), yoff = hiprand_uniform(state);
float4 cas;
for(int x = stx; x < stx + WIDTH_PER_THREADS; x++)
for (int y = sty; y < sty + HEIGHT_PER_THREADS; y++)
{
surf2Dread(&cas, surface, x * sizeof(float4), y);
r = globalCam.generateRay(x - WIDTH / 2 + xoff, y - HEIGHT / 2 + yoff);
px_color = pathTracer(r, *scene, state + idx);
px_color = frac2 * make_float3(cas.x, cas.y, cas.z) + frac1 * px_color;
// surf2Dwrite(make_float4(px_color.x, px_color.y, px_color.z, 1.0f), surfacew, x * sizeof(float4), y);
surf2Dwrite(make_float4(px_color.x, px_color.y, px_color.z, 1.0f), surface, x * sizeof(float4), y);
}
}
void display();
__device__ void computeTexture();
bool renderScene(bool);
GLuint initGL();
GLFWwindow* glEnvironmentSetup();
bool initCUDA(GLuint glTex);
void test_for_initialize_scene();
Scene *sce;
EnvironmentLight *e;
int main(int argc, char **argv)
{
GLFWwindow *window = glEnvironmentSetup();
bool changed = true, sta = true;
tex = initGL();
test_for_initialize_scene();
initCUDA(tex);
auto error = hipDeviceSynchronize();
while (!glfwWindowShouldClose(window) && sta && changed)
{
sta = renderScene(changed);
//changed = false;
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
hipFree(state);
hipFree(data_tmp);
return 0;
}
bool renderScene(bool changed)
{
hipError_t error;
//The information for kernel
dim3 blockSize(WIDTH / WIDTH_PER_BLOCK, HEIGHT / HEIGHT_PER_BLOCK);
dim3 threadSize(WIDTH_PER_BLOCK / WIDTH_PER_THREADS, HEIGHT_PER_BLOCK / HEIGHT_PER_THREADS);
hipGraphLaunch(exe, stream);
error = hipStreamSynchronize(stream);
display();
int idx = 0;
return true;
}
GLuint initGL()
{
//The position of the quad which covers the full screen
static float vertices[6][2] = {
-1.0f, 1.0f,
-1.0f, -1.0f,
1.0f, 1.0f,
1.0f, 1.0f,
-1.0f, -1.0f,
1.0f, -1.0f
};
GLuint tex;
//initialize the empty texture
//and set the parameter for it
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, WIDTH, HEIGHT, 0, GL_RGBA, GL_FLOAT, nullptr);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
GLuint buffer;
GLuint vao;
//Push the vertices information into the vertex arrayy
glCreateBuffers(1, &buffer);
glCreateVertexArrays(1, &vao);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(1, 2, GL_FLOAT, NULL, NULL, nullptr);
glEnableVertexAttribArray(1);
//Initialize the OpenGL shaders and program
prog = glCreateProgram();
Shader vertex, frag;
vertex.LoadFile("./Shader/texture.vert");
frag.LoadFile("./Shader/texture.frag");
vertex.Load(GL_VERTEX_SHADER, prog);
frag.Load(GL_FRAGMENT_SHADER, prog);
glLinkProgram(prog);
glBindTexture(GL_TEXTURE_2D, 0);
return tex;
}
GLFWwindow* glEnvironmentSetup()
{
glfwInit();
GLFWwindow *window = glfwCreateWindow(WIDTH, HEIGHT, "test", NULL, NULL);
glfwMakeContextCurrent(window);
gl3wInit();
return window;
}
bool initCUDA(GLuint glTex)
{
hipGraph_t renderProcess;
//The information for kernel
dim3 blockSize(WIDTH / WIDTH_PER_BLOCK, HEIGHT / HEIGHT_PER_BLOCK);
dim3 threadSize(WIDTH_PER_BLOCK / WIDTH_PER_THREADS, HEIGHT_PER_BLOCK / HEIGHT_PER_THREADS);
int thread_num = blockSize.x * blockSize.y * threadSize.x * threadSize.y;
//Create the surface bound to OpenGL texture
size_t heap_sz;
auto error = hipDeviceGetLimit(&heap_sz, hipLimitMallocHeapSize);
error = hipGraphicsGLRegisterImage(&cudaTex, tex, GL_TEXTURE_2D, hipGraphicsRegisterFlagsSurfaceLoadStore);
error = hipGraphicsMapResources(1, &cudaTex, 0);
hipArray_t texArray;
error = hipGraphicsSubResourceGetMappedArray(&texArray, cudaTex, 0, 0);
hipResourceDesc dsc;
dsc.resType = hipResourceTypeArray;
dsc.res.array.array = texArray;
error = hipCreateSurfaceObject(&texture_surface, &dsc);
//Create the surface to Store temporary information
hipArray *arr;
hipChannelFormatDesc channel = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat);
error = hipMallocArray(&arr, &channel, WIDTH, HEIGHT, hipArraySurfaceLoadStore);
dsc.res.array.array = arr;
error = hipCreateSurfaceObject(&surface_raw, &dsc);
//Create the surface to Store temporary information
channel = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat);
error = hipMallocArray(&arr, &channel, WIDTH, HEIGHT, hipArraySurfaceLoadStore);
dsc.res.array.array = arr;
error = hipCreateSurfaceObject(&surface_hdr, &dsc);
//Initialize the camera
Camera cam(make_float3(0.0f, 0.0f, 0.0f), make_float3(0.0f, 0.0f, -1.0f), 2.0f, 0.10f, 1000.0f,
make_int2(WIDTH / 2, HEIGHT / 2), make_float3(0.0f, 1.0f, 0.0f));
//Malloc rand generator
hipMalloc(&state, sizeof(hiprandState_t) * thread_num * 2);
int t = time(NULL);
int *p;
hipMalloc(&p, sizeof(int));
hipMemcpy(p, &t, sizeof(int), hipMemcpyHostToDevice);
initial << <blockSize, threadSize >> > (state, p);
error = hipDeviceSynchronize();
//Initialize camera
error = hipMemcpyToSymbol(globalCam, &cam, sizeof(Camera));
//Initialize the count
error = hipMalloc(&cnt, sizeof(int));
error = hipMemset(cnt, 0, sizeof(int));
//Capture the stream to Create Graph
hipStreamCreate(&stream);
//Capture the procedure for rendering to create cudaGraph
//The procedure is render -> HDR -> filter
hipStreamBeginCapture(stream);
hipLaunchKernelGGL(( debug) , dim3(blockSize), dim3(threadSize), 0, stream, surface_raw, texture_surface, sce, state, cnt);
hipLaunchKernelGGL(( HDRKernel) , dim3(blockSize), dim3(threadSize), 0, stream, surface_hdr, surface_raw, WIDTH_PER_THREADS, HEIGHT_PER_THREADS, WIDTH_PER_BLOCK, HEIGHT_PER_BLOCK);
hipLaunchKernelGGL(( filterKernel) , dim3(blockSize), dim3(threadSize), 0, stream, texture_surface, surface_hdr, WIDTH_PER_THREADS, HEIGHT_PER_THREADS, WIDTH_PER_BLOCK, HEIGHT_PER_BLOCK, WIDTH, HEIGHT);
hipStreamEndCapture(stream, &renderProcess);
hipGraphInstantiate(&exe, renderProcess, nullptr, nullptr, 0);
return error == hipSuccess;
}
void display()
{
glUseProgram(prog);
glBindTexture(GL_TEXTURE_2D, tex);
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLES, 0, 6);
}
void test_for_initialize_scene()
{
int width, height, chanel;
float * tmp = stbi_loadf("environment.png", &width, &height, &chanel, 4);
EnvironmentLight light(tmp, width, height);
STBI_FREE(tmp);
Scene scene;
int lz[light::TYPE_NUM] = { 0,0,1,1 }, ms[model::TYPE_NUM] = { 0,0,2 };
int mat_type[] = { material::LAMBERTIAN , material::LAMBERTIAN, material::LAMBERTIAN };
Lambertian lamb(make_float3(0.7f, 0.0f, 0.0f)), lamb2(make_float3(0.9f, 0.0f, 0.0f)), lamb3(make_float3(1.0f, 1.0f, 1.0f));
Material m(&lamb, material::LAMBERTIAN), c(&lamb2, material::LAMBERTIAN), cs(&lamb3, material::LAMBERTIAN);
Material t[] = { m,c ,cs };
TriangleLight trl(make_float3(0.0f, 10.3f, -2.0f),
make_float3(2.0f, 0.7f, -3.0f),
make_float3(0.0f, 0.0f, -3.0f), make_float3(31.0f, 31.0f, 31.0f), true);
Quadratic q(make_float3(0.33f, 0.0f, 0.0f), Sphere);
q.setUpTransformation(
mat4(1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, -8.0f,
0.0f, 0.0f, 0.0f, 1.0f)
);
Quadratic s(make_float3(0.01f, 0.0f, 0.0f), Sphere);
s.setUpTransformation(
mat4(1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, -103.0f,
0.0f, 0.0f, 1.0f, -8.0f,
0.0f, 0.0f, 0.0f, 1.0f)
);
PointLight pl(make_float3(-8.0f, 0.0f, 0.0f), make_float3(233.7f, 33.8f, 77.7f));
Quadratic m_a[] = { q,s };
DirectionalLight disl(make_float3(0.0f, -1.0f, 0.0f), make_float3(5.0f, 5.0f, 5.0f));
scene.initializeScene(
lz, ms, &pl, &disl, &trl, &light, nullptr, nullptr,
m_a, mat_type, t
);
hipMalloc(&sce, sizeof(Scene));
auto error = hipMemcpy(sce, &scene, sizeof(Scene), hipMemcpyHostToDevice);
}
| 664dd02eaff60b19b1167d4915906fb52621d6d4.cu | #define WIDTH 300
#define HEIGHT 300
#define WIDTH_PER_BLOCK 300
#define HEIGHT_PER_BLOCK 1
#define WIDTH_PER_THREADS 1
#define HEIGHT_PER_THREADS 1
#define NUM 16
#define MAX_DEPTH 10
#define SAMPLE 4
#define WARP_SIZE 32
#include <glut/gl3w.h>
#include <Windows.h>
#include <stdio.h>
#include "surface_functions.h"
#include <vector_functions.hpp>
#include <cuda_gl_interop.h>
#include <device_functions.h>
#include "core/PathTracer.cuh"
#include "Camera/Camera.cuh"
#include <glut/glfw3.h>
#include "Shader/myShader.h"
#include "core/PathTracer.cuh"
#include "Ray/Ray.cuh"
#include "PostProcess/PostProcess.cuh"
#include <iostream>
#include <ctime>
#define STB_IMAGE_IMPLEMENTATION
#include "tool/stb_image.h"
curandState *state;
float *data_tmp;//store the rgb value for scene
int *cnt;//Store the times of rendering
GLuint tex;
GLuint prog;
cudaGraphicsResource *cudaTex;
cudaGraphExec_t exe;
cudaSurfaceObject_t texture_surface, surface_raw, surface_hdr;
cudaStream_t stream;
__constant__ Camera globalCam;
//Initialize the state of rand generator
__global__ void initial(curandState *state, int *time)
{
int idx = (gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x * blockDim.y + (threadIdx.x * blockDim.y + threadIdx.y);
curand_init(idx + 332 ,idx, 0, state + idx);
}
__global__ void debug(cudaSurfaceObject_t surface, cudaSurfaceObject_t surfacew, Scene *scene, curandState *state, int *cnt)
{
if (blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0)
++(*cnt);
__syncthreads();
int stx = blockIdx.x * WIDTH_PER_BLOCK + threadIdx.x * WIDTH_PER_THREADS;
int sty = blockIdx.y * HEIGHT_PER_BLOCK + threadIdx.y * HEIGHT_PER_THREADS;
int idx = (gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x * blockDim.y + (threadIdx.x * blockDim.y + threadIdx.y);
curandState *rstate = state + idx;
//curand_init(1234, idx, 0, rstate);//Produce many noises after initialization
IntersectRecord rec;
float3 px_color = BLACK;
Ray r;
float frac1 = 1.0f / *cnt;
float frac2 = frac1 * (*cnt - 1);
float xoff = curand_uniform(state), yoff = curand_uniform(state);
float4 cas;
for(int x = stx; x < stx + WIDTH_PER_THREADS; x++)
for (int y = sty; y < sty + HEIGHT_PER_THREADS; y++)
{
surf2Dread(&cas, surface, x * sizeof(float4), y);
r = globalCam.generateRay(x - WIDTH / 2 + xoff, y - HEIGHT / 2 + yoff);
px_color = pathTracer(r, *scene, state + idx);
px_color = frac2 * make_float3(cas.x, cas.y, cas.z) + frac1 * px_color;
// surf2Dwrite(make_float4(px_color.x, px_color.y, px_color.z, 1.0f), surfacew, x * sizeof(float4), y);
surf2Dwrite(make_float4(px_color.x, px_color.y, px_color.z, 1.0f), surface, x * sizeof(float4), y);
}
}
void display();
__device__ void computeTexture();
bool renderScene(bool);
GLuint initGL();
GLFWwindow* glEnvironmentSetup();
bool initCUDA(GLuint glTex);
void test_for_initialize_scene();
Scene *sce;
EnvironmentLight *e;
int main(int argc, char **argv)
{
GLFWwindow *window = glEnvironmentSetup();
bool changed = true, sta = true;
tex = initGL();
test_for_initialize_scene();
initCUDA(tex);
auto error = cudaDeviceSynchronize();
while (!glfwWindowShouldClose(window) && sta && changed)
{
sta = renderScene(changed);
//changed = false;
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
cudaFree(state);
cudaFree(data_tmp);
return 0;
}
bool renderScene(bool changed)
{
cudaError_t error;
//The information for kernel
dim3 blockSize(WIDTH / WIDTH_PER_BLOCK, HEIGHT / HEIGHT_PER_BLOCK);
dim3 threadSize(WIDTH_PER_BLOCK / WIDTH_PER_THREADS, HEIGHT_PER_BLOCK / HEIGHT_PER_THREADS);
cudaGraphLaunch(exe, stream);
error = cudaStreamSynchronize(stream);
display();
int idx = 0;
return true;
}
GLuint initGL()
{
//The position of the quad which covers the full screen
static float vertices[6][2] = {
-1.0f, 1.0f,
-1.0f, -1.0f,
1.0f, 1.0f,
1.0f, 1.0f,
-1.0f, -1.0f,
1.0f, -1.0f
};
GLuint tex;
//initialize the empty texture
//and set the parameter for it
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, WIDTH, HEIGHT, 0, GL_RGBA, GL_FLOAT, nullptr);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
GLuint buffer;
GLuint vao;
//Push the vertices information into the vertex arrayy
glCreateBuffers(1, &buffer);
glCreateVertexArrays(1, &vao);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(1, 2, GL_FLOAT, NULL, NULL, nullptr);
glEnableVertexAttribArray(1);
//Initialize the OpenGL shaders and program
prog = glCreateProgram();
Shader vertex, frag;
vertex.LoadFile("./Shader/texture.vert");
frag.LoadFile("./Shader/texture.frag");
vertex.Load(GL_VERTEX_SHADER, prog);
frag.Load(GL_FRAGMENT_SHADER, prog);
glLinkProgram(prog);
glBindTexture(GL_TEXTURE_2D, 0);
return tex;
}
GLFWwindow* glEnvironmentSetup()
{
glfwInit();
GLFWwindow *window = glfwCreateWindow(WIDTH, HEIGHT, "test", NULL, NULL);
glfwMakeContextCurrent(window);
gl3wInit();
return window;
}
bool initCUDA(GLuint glTex)
{
cudaGraph_t renderProcess;
//The information for kernel
dim3 blockSize(WIDTH / WIDTH_PER_BLOCK, HEIGHT / HEIGHT_PER_BLOCK);
dim3 threadSize(WIDTH_PER_BLOCK / WIDTH_PER_THREADS, HEIGHT_PER_BLOCK / HEIGHT_PER_THREADS);
int thread_num = blockSize.x * blockSize.y * threadSize.x * threadSize.y;
//Create the surface bound to OpenGL texture
size_t heap_sz;
auto error = cudaDeviceGetLimit(&heap_sz, cudaLimitMallocHeapSize);
error = cudaGraphicsGLRegisterImage(&cudaTex, tex, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsSurfaceLoadStore);
error = cudaGraphicsMapResources(1, &cudaTex, 0);
cudaArray_t texArray;
error = cudaGraphicsSubResourceGetMappedArray(&texArray, cudaTex, 0, 0);
cudaResourceDesc dsc;
dsc.resType = cudaResourceTypeArray;
dsc.res.array.array = texArray;
error = cudaCreateSurfaceObject(&texture_surface, &dsc);
//Create the surface to Store temporary information
cudaArray *arr;
cudaChannelFormatDesc channel = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);
error = cudaMallocArray(&arr, &channel, WIDTH, HEIGHT, cudaArraySurfaceLoadStore);
dsc.res.array.array = arr;
error = cudaCreateSurfaceObject(&surface_raw, &dsc);
//Create the surface to Store temporary information
channel = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);
error = cudaMallocArray(&arr, &channel, WIDTH, HEIGHT, cudaArraySurfaceLoadStore);
dsc.res.array.array = arr;
error = cudaCreateSurfaceObject(&surface_hdr, &dsc);
//Initialize the camera
Camera cam(make_float3(0.0f, 0.0f, 0.0f), make_float3(0.0f, 0.0f, -1.0f), 2.0f, 0.10f, 1000.0f,
make_int2(WIDTH / 2, HEIGHT / 2), make_float3(0.0f, 1.0f, 0.0f));
//Malloc rand generator
cudaMalloc(&state, sizeof(curandState) * thread_num * 2);
int t = time(NULL);
int *p;
cudaMalloc(&p, sizeof(int));
cudaMemcpy(p, &t, sizeof(int), cudaMemcpyHostToDevice);
initial << <blockSize, threadSize >> > (state, p);
error = cudaDeviceSynchronize();
//Initialize camera
error = cudaMemcpyToSymbol(globalCam, &cam, sizeof(Camera));
//Initialize the count
error = cudaMalloc(&cnt, sizeof(int));
error = cudaMemset(cnt, 0, sizeof(int));
//Capture the stream to Create Graph
cudaStreamCreate(&stream);
//Capture the procedure for rendering to create cudaGraph
//The procedure is render -> HDR -> filter
cudaStreamBeginCapture(stream);
debug <<<blockSize, threadSize, 0, stream>>> (surface_raw, texture_surface, sce, state, cnt);
HDRKernel <<<blockSize, threadSize, 0, stream>>> (surface_hdr, surface_raw, WIDTH_PER_THREADS, HEIGHT_PER_THREADS, WIDTH_PER_BLOCK, HEIGHT_PER_BLOCK);
filterKernel <<<blockSize, threadSize, 0, stream>>> (texture_surface, surface_hdr, WIDTH_PER_THREADS, HEIGHT_PER_THREADS, WIDTH_PER_BLOCK, HEIGHT_PER_BLOCK, WIDTH, HEIGHT);
cudaStreamEndCapture(stream, &renderProcess);
cudaGraphInstantiate(&exe, renderProcess, nullptr, nullptr, 0);
return error == cudaSuccess;
}
void display()
{
glUseProgram(prog);
glBindTexture(GL_TEXTURE_2D, tex);
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLES, 0, 6);
}
void test_for_initialize_scene()
{
int width, height, chanel;
float * tmp = stbi_loadf("environment.png", &width, &height, &chanel, 4);
EnvironmentLight light(tmp, width, height);
STBI_FREE(tmp);
Scene scene;
int lz[light::TYPE_NUM] = { 0,0,1,1 }, ms[model::TYPE_NUM] = { 0,0,2 };
int mat_type[] = { material::LAMBERTIAN , material::LAMBERTIAN, material::LAMBERTIAN };
Lambertian lamb(make_float3(0.7f, 0.0f, 0.0f)), lamb2(make_float3(0.9f, 0.0f, 0.0f)), lamb3(make_float3(1.0f, 1.0f, 1.0f));
Material m(&lamb, material::LAMBERTIAN), c(&lamb2, material::LAMBERTIAN), cs(&lamb3, material::LAMBERTIAN);
Material t[] = { m,c ,cs };
TriangleLight trl(make_float3(0.0f, 10.3f, -2.0f),
make_float3(2.0f, 0.7f, -3.0f),
make_float3(0.0f, 0.0f, -3.0f), make_float3(31.0f, 31.0f, 31.0f), true);
Quadratic q(make_float3(0.33f, 0.0f, 0.0f), Sphere);
q.setUpTransformation(
mat4(1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, -8.0f,
0.0f, 0.0f, 0.0f, 1.0f)
);
Quadratic s(make_float3(0.01f, 0.0f, 0.0f), Sphere);
s.setUpTransformation(
mat4(1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, -103.0f,
0.0f, 0.0f, 1.0f, -8.0f,
0.0f, 0.0f, 0.0f, 1.0f)
);
PointLight pl(make_float3(-8.0f, 0.0f, 0.0f), make_float3(233.7f, 33.8f, 77.7f));
Quadratic m_a[] = { q,s };
DirectionalLight disl(make_float3(0.0f, -1.0f, 0.0f), make_float3(5.0f, 5.0f, 5.0f));
scene.initializeScene(
lz, ms, &pl, &disl, &trl, &light, nullptr, nullptr,
m_a, mat_type, t
);
cudaMalloc(&sce, sizeof(Scene));
auto error = cudaMemcpy(sce, &scene, sizeof(Scene), cudaMemcpyHostToDevice);
}
|
14999ecdfd6442dd75baeb7ed724372e2830e9fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 27.11.2018
//
#include <loops/special_kernels.h>
#include <ops/declarable/helpers/flatten.h>
namespace sd {
////////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL void flattenKernel(sd::Pointer *extraPointers, int dOffset, char order, void *vz, sd::LongType *zShapeInfo,
void *vy, sd::LongType *yShapeInfo) {
auto z = reinterpret_cast<T *>(vz);
auto y = reinterpret_cast<T *>(vy);
__shared__ sd::LongType lenY, yOrder, zEWS, yEWS;
if (threadIdx.x == 0) {
yEWS = shape::elementWiseStride(yShapeInfo);
zEWS = shape::elementWiseStride(zShapeInfo);
lenY = shape::length(yShapeInfo);
}
__syncthreads();
sd::LongType tid = blockIdx.x * blockDim.x + threadIdx.x;
for (auto i = tid; i < lenY; i += gridDim.x * blockDim.x)
z[i * zEWS + dOffset] = y[ops::helpers::getIndexOffsetOrdered(i, yShapeInfo, order)];
}
////////////////////////////////////////////////////////////////////////
template <typename T>
SD_HOST void flattenKernelGeneric(dim3 &launchDims, hipStream_t *stream, sd::Pointer *extraPointers, int dOffset,
char order, void *vz, sd::LongType *zShapeInfo, void *vy, sd::LongType *yShapeInfo) {
hipLaunchKernelGGL(( flattenKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, extraPointers, dOffset, order, vz, zShapeInfo,
vy, yShapeInfo);
sd::DebugHelper::checkErrorCode(stream, "flattenGeneric(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void flattenKernelGeneric,
(dim3 & launchDims, hipStream_t *stream, sd::Pointer *extraPointers, int dOffset, char order,
void *vz, sd::LongType *zShapeInfo, void *vy, sd::LongType *yShapeInfo),
SD_COMMON_TYPES);
} // namespace sd
| 14999ecdfd6442dd75baeb7ed724372e2830e9fc.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 27.11.2018
//
#include <loops/special_kernels.h>
#include <ops/declarable/helpers/flatten.h>
namespace sd {
////////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL void flattenKernel(sd::Pointer *extraPointers, int dOffset, char order, void *vz, sd::LongType *zShapeInfo,
void *vy, sd::LongType *yShapeInfo) {
auto z = reinterpret_cast<T *>(vz);
auto y = reinterpret_cast<T *>(vy);
__shared__ sd::LongType lenY, yOrder, zEWS, yEWS;
if (threadIdx.x == 0) {
yEWS = shape::elementWiseStride(yShapeInfo);
zEWS = shape::elementWiseStride(zShapeInfo);
lenY = shape::length(yShapeInfo);
}
__syncthreads();
sd::LongType tid = blockIdx.x * blockDim.x + threadIdx.x;
for (auto i = tid; i < lenY; i += gridDim.x * blockDim.x)
z[i * zEWS + dOffset] = y[ops::helpers::getIndexOffsetOrdered(i, yShapeInfo, order)];
}
////////////////////////////////////////////////////////////////////////
template <typename T>
SD_HOST void flattenKernelGeneric(dim3 &launchDims, cudaStream_t *stream, sd::Pointer *extraPointers, int dOffset,
char order, void *vz, sd::LongType *zShapeInfo, void *vy, sd::LongType *yShapeInfo) {
flattenKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(extraPointers, dOffset, order, vz, zShapeInfo,
vy, yShapeInfo);
sd::DebugHelper::checkErrorCode(stream, "flattenGeneric(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void flattenKernelGeneric,
(dim3 & launchDims, cudaStream_t *stream, sd::Pointer *extraPointers, int dOffset, char order,
void *vz, sd::LongType *zShapeInfo, void *vy, sd::LongType *yShapeInfo),
SD_COMMON_TYPES);
} // namespace sd
|
c8b6dbcc9806d20202f88c6fc603df595d134dec.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include "helper_cuda.h"
#include "helper_timer.h"
#include <iostream>
#include "bucketsort.cuh"
#include "mergesort.cuh"
using namespace std;
////////////////////////////////////////////////////////////////////////////////
// Size of the testset (Bitwise shift of 1 over 22 places)
////////////////////////////////////////////////////////////////////////////////
#define SIZE (1 << 22)
////////////////////////////////////////////////////////////////////////////////
// Number of tests to average over
////////////////////////////////////////////////////////////////////////////////
#define TEST 4
////////////////////////////////////////////////////////////////////////////////
// The timers for the different parts of the algo
////////////////////////////////////////////////////////////////////////////////
StopWatchInterface *uploadTimer, *downloadTimer, *bucketTimer,
*mergeTimer, *totalTimer, *cpuTimer;
////////////////////////////////////////////////////////////////////////////////
// Compare method for CPU sort
////////////////////////////////////////////////////////////////////////////////
inline int compare(const void *a, const void *b) {
if(*((float *)a) < *((float *)b)) return -1;
else if(*((float *)a) > *((float *)b)) return 1;
else return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Forward declaration
////////////////////////////////////////////////////////////////////////////////
void cudaSort(float *origList, float minimum, float maximum,
float *resultList, int numElements);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
// Create timers for each sort
sdkCreateTimer(&uploadTimer);
sdkCreateTimer(&downloadTimer);
sdkCreateTimer(&bucketTimer);
sdkCreateTimer(&mergeTimer);
sdkCreateTimer(&totalTimer);
sdkCreateTimer(&cpuTimer);
int numElements = 0;
// Number of elements in the test bed
if(strcmp(argv[1],"r") ==0) {
numElements = SIZE;
}
else {
FILE *fp;
fp = fopen(argv[1],"r");
if(fp == NULL) {
cout << "Error reading file" << endl;
exit(EXIT_FAILURE);
}
int count = 0;
float c;
while(fscanf(fp,"%f",&c) != EOF) {
count++;
}
fclose(fp);
numElements = count;
}
cout << "Sorting list of " << numElements << " floats\n";
//
int devID = 0;
if(argc == 3) {
devID = atoi(argv[2]);
}
printf("select device : %d\n", devID);
hipSetDevice(devID);
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess){
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}else{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Generate random data
// Memory space the list of random floats will take up
int mem_size = numElements * sizeof(float);
// Allocate enough for the input list
float *cpu_idata = (float *)malloc(mem_size);
// Allocate enough for the output list on the cpu side
float *cpu_odata = (float *)malloc(mem_size);
// Allocate enough memory for the output list on the gpu side
float *gpu_odata = (float *)malloc(mem_size);
float datamin = FLT_MAX;
float datamax = -FLT_MAX;
if(strcmp(argv[1],"r")==0) {
for (int i = 0; i < numElements; i++) {
// Generate random floats between 0 and 1 for the input data
cpu_idata[i] = ((float) rand() / RAND_MAX);
//Compare data at index to data minimum, if less than current minimum, set that element as new minimum
datamin = min(cpu_idata[i], datamin);
//Same as above but for maximum
datamax = max(cpu_idata[i], datamax);
}
} else {
FILE *fp;
fp = fopen(argv[1],"r");
for(int i = 0; i < numElements; i++) {
fscanf(fp,"%f",&cpu_idata[i]);
datamin = min(cpu_idata[i], datamin);
datamax = max(cpu_idata[i],datamax);
}
}
cout << "Sorting on GPU..." << flush;
// GPU Sort
for (int i = 0; i < TEST; i++)
cudaSort(cpu_idata, datamin, datamax, gpu_odata, numElements);
cout << "done.\n";
#ifdef VERIFY
cout << "Sorting on CPU..." << flush;
// CPU Sort
memcpy(cpu_odata, cpu_idata, mem_size);
sdkStartTimer(&cpuTimer);
qsort(cpu_odata, numElements, sizeof(float), compare);
sdkStopTimer(&cpuTimer);
cout << "done.\n";
cout << "Checking result..." << flush;
// Result checking
int count = 0;
for(int i = 0; i < numElements; i++)
if(cpu_odata[i] != gpu_odata[i])
{
printf("Sort missmatch on element %d: \n", i);
printf("CPU = %f : GPU = %f\n", cpu_odata[i], gpu_odata[i]);
count++;
break;
}
if(count == 0) cout << "PASSED.\n";
else cout << "FAILED.\n";
#endif
// Timer report
printf("GPU iterations: %d\n", TEST);
#ifdef TIMER
#ifdef VERIFY
printf("Average CPU execution time: %f ms\n", sdkGetTimerValue(&cpuTimer));
#endif
printf("Average GPU execution time: %f ms\n", sdkGetTimerValue(&totalTimer) / TEST);
printf(" - Upload : %f ms\n", sdkGetTimerValue(&uploadTimer) / TEST);
printf(" - Download : %f ms\n", sdkGetTimerValue(&downloadTimer) / TEST);
printf(" - Bucket sort : %f ms\n", sdkGetTimerValue(&bucketTimer) / TEST);
printf(" - Merge sort : %f ms\n", sdkGetTimerValue(&mergeTimer) / TEST);
#endif
#ifdef OUTPUT
FILE *tp;
const char filename2[]="./hybridoutput.txt";
tp = fopen(filename2,"w");
for(int i = 0; i < numElements; i++) {
fprintf(tp,"%f ",cpu_idata[i]);
}
fclose(tp);
#endif
// Release memory
sdkDeleteTimer(&uploadTimer);
sdkDeleteTimer(&downloadTimer);
sdkDeleteTimer(&bucketTimer);
sdkDeleteTimer(&mergeTimer);
sdkDeleteTimer(&totalTimer);
sdkDeleteTimer(&cpuTimer);
free(cpu_idata); free(cpu_odata); free(gpu_odata);
}
void cudaSort(float *origList, float minimum, float maximum,
float *resultList, int numElements)
{
// Initialization and upload data
float *d_input = NULL;
float *d_output = NULL;
int mem_size = (numElements + DIVISIONS * 4) * sizeof(float);
sdkStartTimer(&uploadTimer);
{
hipMalloc((void**) &d_input, mem_size);
hipMalloc((void**) &d_output, mem_size);
hipMemcpy((void *) d_input, (void *)origList, numElements * sizeof(float),
hipMemcpyHostToDevice);
init_bucketsort(numElements);
}
sdkStopTimer(&uploadTimer);
sdkStartTimer(&totalTimer);
// Bucketsort the list
sdkStartTimer(&bucketTimer);
int *sizes = (int*) malloc(DIVISIONS * sizeof(int));
int *nullElements = (int*) malloc(DIVISIONS * sizeof(int));
unsigned int *origOffsets = (unsigned int *) malloc((DIVISIONS + 1) * sizeof(int));
bucketSort(d_input, d_output, numElements, sizes, nullElements,
minimum, maximum, origOffsets);
sdkStopTimer(&bucketTimer);
// Mergesort the result
sdkStartTimer(&mergeTimer);
float4 *d_origList = (float4*) d_output,
*d_resultList = (float4*) d_input;
int newlistsize = 0;
for(int i = 0; i < DIVISIONS; i++)
newlistsize += sizes[i] * 4;
float4 *mergeresult = runMergeSort( newlistsize, DIVISIONS, d_origList, d_resultList,
sizes, nullElements, origOffsets); //d_origList;
hipDeviceSynchronize();
sdkStopTimer(&mergeTimer);
sdkStopTimer(&totalTimer);
// Download result
sdkStartTimer(&downloadTimer);
checkCudaErrors( hipMemcpy((void *) resultList,
(void *)mergeresult, numElements * sizeof(float), hipMemcpyDeviceToHost) );
sdkStopTimer(&downloadTimer);
// Clean up
finish_bucketsort();
hipFree(d_input); hipFree(d_output);
free(nullElements); free(sizes);
}
| c8b6dbcc9806d20202f88c6fc603df595d134dec.cu | #ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include "helper_cuda.h"
#include "helper_timer.h"
#include <iostream>
#include "bucketsort.cuh"
#include "mergesort.cuh"
using namespace std;
////////////////////////////////////////////////////////////////////////////////
// Size of the testset (Bitwise shift of 1 over 22 places)
////////////////////////////////////////////////////////////////////////////////
#define SIZE (1 << 22)
////////////////////////////////////////////////////////////////////////////////
// Number of tests to average over
////////////////////////////////////////////////////////////////////////////////
#define TEST 4
////////////////////////////////////////////////////////////////////////////////
// The timers for the different parts of the algo
////////////////////////////////////////////////////////////////////////////////
StopWatchInterface *uploadTimer, *downloadTimer, *bucketTimer,
*mergeTimer, *totalTimer, *cpuTimer;
////////////////////////////////////////////////////////////////////////////////
// Compare method for CPU sort
////////////////////////////////////////////////////////////////////////////////
inline int compare(const void *a, const void *b) {
if(*((float *)a) < *((float *)b)) return -1;
else if(*((float *)a) > *((float *)b)) return 1;
else return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Forward declaration
////////////////////////////////////////////////////////////////////////////////
void cudaSort(float *origList, float minimum, float maximum,
float *resultList, int numElements);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
// Create timers for each sort
sdkCreateTimer(&uploadTimer);
sdkCreateTimer(&downloadTimer);
sdkCreateTimer(&bucketTimer);
sdkCreateTimer(&mergeTimer);
sdkCreateTimer(&totalTimer);
sdkCreateTimer(&cpuTimer);
int numElements = 0;
// Number of elements in the test bed
if(strcmp(argv[1],"r") ==0) {
numElements = SIZE;
}
else {
FILE *fp;
fp = fopen(argv[1],"r");
if(fp == NULL) {
cout << "Error reading file" << endl;
exit(EXIT_FAILURE);
}
int count = 0;
float c;
while(fscanf(fp,"%f",&c) != EOF) {
count++;
}
fclose(fp);
numElements = count;
}
cout << "Sorting list of " << numElements << " floats\n";
//
int devID = 0;
if(argc == 3) {
devID = atoi(argv[2]);
}
printf("select device : %d\n", devID);
cudaSetDevice(devID);
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess){
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}else{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Generate random data
// Memory space the list of random floats will take up
int mem_size = numElements * sizeof(float);
// Allocate enough for the input list
float *cpu_idata = (float *)malloc(mem_size);
// Allocate enough for the output list on the cpu side
float *cpu_odata = (float *)malloc(mem_size);
// Allocate enough memory for the output list on the gpu side
float *gpu_odata = (float *)malloc(mem_size);
float datamin = FLT_MAX;
float datamax = -FLT_MAX;
if(strcmp(argv[1],"r")==0) {
for (int i = 0; i < numElements; i++) {
// Generate random floats between 0 and 1 for the input data
cpu_idata[i] = ((float) rand() / RAND_MAX);
//Compare data at index to data minimum, if less than current minimum, set that element as new minimum
datamin = min(cpu_idata[i], datamin);
//Same as above but for maximum
datamax = max(cpu_idata[i], datamax);
}
} else {
FILE *fp;
fp = fopen(argv[1],"r");
for(int i = 0; i < numElements; i++) {
fscanf(fp,"%f",&cpu_idata[i]);
datamin = min(cpu_idata[i], datamin);
datamax = max(cpu_idata[i],datamax);
}
}
cout << "Sorting on GPU..." << flush;
// GPU Sort
for (int i = 0; i < TEST; i++)
cudaSort(cpu_idata, datamin, datamax, gpu_odata, numElements);
cout << "done.\n";
#ifdef VERIFY
cout << "Sorting on CPU..." << flush;
// CPU Sort
memcpy(cpu_odata, cpu_idata, mem_size);
sdkStartTimer(&cpuTimer);
qsort(cpu_odata, numElements, sizeof(float), compare);
sdkStopTimer(&cpuTimer);
cout << "done.\n";
cout << "Checking result..." << flush;
// Result checking
int count = 0;
for(int i = 0; i < numElements; i++)
if(cpu_odata[i] != gpu_odata[i])
{
printf("Sort missmatch on element %d: \n", i);
printf("CPU = %f : GPU = %f\n", cpu_odata[i], gpu_odata[i]);
count++;
break;
}
if(count == 0) cout << "PASSED.\n";
else cout << "FAILED.\n";
#endif
// Timer report
printf("GPU iterations: %d\n", TEST);
#ifdef TIMER
#ifdef VERIFY
printf("Average CPU execution time: %f ms\n", sdkGetTimerValue(&cpuTimer));
#endif
printf("Average GPU execution time: %f ms\n", sdkGetTimerValue(&totalTimer) / TEST);
printf(" - Upload : %f ms\n", sdkGetTimerValue(&uploadTimer) / TEST);
printf(" - Download : %f ms\n", sdkGetTimerValue(&downloadTimer) / TEST);
printf(" - Bucket sort : %f ms\n", sdkGetTimerValue(&bucketTimer) / TEST);
printf(" - Merge sort : %f ms\n", sdkGetTimerValue(&mergeTimer) / TEST);
#endif
#ifdef OUTPUT
FILE *tp;
const char filename2[]="./hybridoutput.txt";
tp = fopen(filename2,"w");
for(int i = 0; i < numElements; i++) {
fprintf(tp,"%f ",cpu_idata[i]);
}
fclose(tp);
#endif
// Release memory
sdkDeleteTimer(&uploadTimer);
sdkDeleteTimer(&downloadTimer);
sdkDeleteTimer(&bucketTimer);
sdkDeleteTimer(&mergeTimer);
sdkDeleteTimer(&totalTimer);
sdkDeleteTimer(&cpuTimer);
free(cpu_idata); free(cpu_odata); free(gpu_odata);
}
void cudaSort(float *origList, float minimum, float maximum,
float *resultList, int numElements)
{
// Initialization and upload data
float *d_input = NULL;
float *d_output = NULL;
int mem_size = (numElements + DIVISIONS * 4) * sizeof(float);
sdkStartTimer(&uploadTimer);
{
cudaMalloc((void**) &d_input, mem_size);
cudaMalloc((void**) &d_output, mem_size);
cudaMemcpy((void *) d_input, (void *)origList, numElements * sizeof(float),
cudaMemcpyHostToDevice);
init_bucketsort(numElements);
}
sdkStopTimer(&uploadTimer);
sdkStartTimer(&totalTimer);
// Bucketsort the list
sdkStartTimer(&bucketTimer);
int *sizes = (int*) malloc(DIVISIONS * sizeof(int));
int *nullElements = (int*) malloc(DIVISIONS * sizeof(int));
unsigned int *origOffsets = (unsigned int *) malloc((DIVISIONS + 1) * sizeof(int));
bucketSort(d_input, d_output, numElements, sizes, nullElements,
minimum, maximum, origOffsets);
sdkStopTimer(&bucketTimer);
// Mergesort the result
sdkStartTimer(&mergeTimer);
float4 *d_origList = (float4*) d_output,
*d_resultList = (float4*) d_input;
int newlistsize = 0;
for(int i = 0; i < DIVISIONS; i++)
newlistsize += sizes[i] * 4;
float4 *mergeresult = runMergeSort( newlistsize, DIVISIONS, d_origList, d_resultList,
sizes, nullElements, origOffsets); //d_origList;
cudaThreadSynchronize();
sdkStopTimer(&mergeTimer);
sdkStopTimer(&totalTimer);
// Download result
sdkStartTimer(&downloadTimer);
checkCudaErrors( cudaMemcpy((void *) resultList,
(void *)mergeresult, numElements * sizeof(float), cudaMemcpyDeviceToHost) );
sdkStopTimer(&downloadTimer);
// Clean up
finish_bucketsort();
cudaFree(d_input); cudaFree(d_output);
free(nullElements); free(sizes);
}
|
033eec2c456caf2e0fc8f01e60b5d344055e9fe7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// G5
// Finding number of K-Cliques in an undirected graph
// Find made iterative, one thread per subtree
#include <iostream>
#include <algorithm>
#include <map>
#include <chrono>
#include <assert.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
using namespace std;
using namespace std::chrono;
// It will store graph like adjacency list.
int *v;
int *v_size;
int n, m, k;
// It will store the degree of each node
__global__ void degree(int *e1, int *e2, int *d, int *v_size, int m)
{
int idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < m)
{
int x = e1[idx], y = e2[idx];
int *dx = &d[x], *dy = &d[y];
atomicAdd(dx,1);
atomicAdd(dy,1);
atomicAdd(&v_size[x],1);
}
}
__global__ void prefix_sum(int *v_size, int n)
{
for(int i = 1; i < n; i++)
{
v_size[i] += v_size[i - 1];
}
}
__global__ void adj(int *e1, int *e2, int *v, int *v_i, int *v_size, int m)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < m)
{
int x = e1[idx], y = e2[idx];
// x is smaller than y
int i = atomicAdd(&v_i[x], 1);
i += v_size[x-1];
assert(i<m);
v[i]=y;
}
}
__global__ void find_iterative(int *d_k, bool *G_linear, int *imp, int *d_imp_size, int *cnt)
{
int rootIdx = (blockIdx.x * blockDim.x) + threadIdx.x;
int imp_size = (*d_imp_size);
if (rootIdx>=imp_size)return;
int k = (*d_k);
int root = imp[rootIdx];
int thread_count=0;
int lvl = 2;
bool** lvl_vertices = (bool**)malloc((k + 1) * sizeof(bool*));
int* num_lvl_vertices = (int*)malloc((k + 1) * sizeof(int));
int* cur_vertex_id = (int*)malloc((k + 1) * sizeof(int));
// The part of G_linear from root*imp_size to root*imp_size + imp_size - 1;
lvl_vertices[lvl] = &(G_linear[rootIdx * imp_size]);
// printf("inside Kernel, root = %d \n", root);
// for(int i = 0; i < imp_size; i++)
// printf("%d ", lvl_vertices[lvl][i]);
// printf("\n");
cur_vertex_id[lvl] = 0;
while(cur_vertex_id[lvl] < imp_size)
{
// printf("root = %d, lvl = %d, cur_vertex_id = %d \n", root, lvl, cur_vertex_id[lvl]);
if(lvl_vertices[lvl][cur_vertex_id[lvl]] == 0)
{
// printf("Continued from cur_vertex_id = %d \n", cur_vertex_id[lvl]);
cur_vertex_id[lvl]++;
while(cur_vertex_id[lvl] == imp_size && lvl > 2)
{
// Go to parent level
lvl = lvl - 1;
// Go to parent's next sibling
cur_vertex_id[lvl]++;
}
continue;
}
// vertex = imp[cur_vertex_id[lvl]];
// vertex's adjacency list is the part of G_linear from vertex_id * imp_size to vertex_id * imp_size + imp_size - 1
bool* adj_vertex = (bool*)malloc(imp_size * sizeof(bool));
adj_vertex = &(G_linear[cur_vertex_id[lvl] * imp_size]);
// printf("Till now ->\t\t\t");
// for(int i = 0; i < imp_size; i++)
// printf("%d ", lvl_vertices[lvl][i]);
// printf("\n");
// printf("Adjacency list ->\t");
// for(int i = 0; i < imp_size; i++)
// printf("%d ", adj_vertex[i]);
// printf("\n");
// intersec of adj_vertex[] with lvl_vertices[lvl][]
lvl_vertices[lvl + 1] = (bool*)malloc(imp_size * sizeof(bool));
num_lvl_vertices[lvl + 1] = 0;
for(int i = 0; i < imp_size; i++)
{
lvl_vertices[lvl + 1][i] = (lvl_vertices[lvl][i] & adj_vertex[i]);
if(lvl_vertices[lvl + 1][i] == 1)
{
num_lvl_vertices[lvl + 1]++;
}
}
// printf("Intersection ->\t\t");
// for(int i = 0; i < imp_size; i++)
// printf("%d ", lvl_vertices[lvl + 1][i]);
// printf("\n");
// printf("num_lvl_vertices = %d \n \n", num_lvl_vertices[lvl + 1]);
if(num_lvl_vertices[lvl + 1] > 0 && lvl + 1 < k)
{
lvl++;
cur_vertex_id[lvl] = 0;
}
else
{
if(lvl + 1 == k)
{
thread_count+=num_lvl_vertices[lvl + 1];
}
// Go to next sibling
cur_vertex_id[lvl]++;
while(cur_vertex_id[lvl] == imp_size && lvl > 2)
{
// Go to parent level
lvl = lvl - 1;
// Go to parent's next sibling
cur_vertex_id[lvl]++;
}
}
}
cnt[rootIdx] = thread_count;
//printf("\nTotal count = %d\n--------------------------------------\n\n", thread_count);
}
int main()
{
#ifndef ONLINE_JUDGE
freopen("./input.txt", "r", stdin);
//freopen("output.txt", "w", stdout);
#endif
//--------------------------- INPUT Starts ----------------------------->
// First line of input should contain number of edges m and size of clique k.
scanf("%d %d", &m, &k);
n = 0;
// map to remove duplicate edges
map<pair<int,int>,int> mp;
for(int i=0; i<m; i++)
{
int x,y;
scanf("%d %d", &x, &y);
// x must smaller than y
if(x > y) swap(x,y);
if(x != y) mp[{x,y}] = 1;
n = max(n, y);
}
n++;
m = mp.size();
// Storing unique edges in e1[i] - e2[i]
int *e1 = (int*) malloc(m * sizeof(int));
int *e2 = (int*) malloc(m * sizeof(int));
int i = 0;
for(auto x: mp)
{
e1[i] = x.first.first;
e2[i] = x.first.second;
i++;
}
// edges in device
int *d_e1, *d_e2;
hipMalloc(&d_e1, m*sizeof(int));
hipMalloc(&d_e2, m*sizeof(int));
cudaCheckErrors("hipMalloc edges failure");
hipMemcpy(d_e1, e1, m*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_e2, e2, m*sizeof(int), hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy edges failure");
//--------------------------- INPUT Ends ------------------------------->
//------------------------ ALGORITHM Starts ---------------------------->
// Start Time
auto start_time = high_resolution_clock::now();
// degree of nodes in device
int *d_d, *d_v_size;
hipMalloc(&d_d, n*sizeof(int));
hipMalloc(&d_v_size, n*sizeof(int));
cudaCheckErrors("hipMalloc degree failure");
hipMemset(d_d, 0, n*sizeof(int));
hipMemset(d_v_size, 0, n*sizeof(int));
cudaCheckErrors("hipMemset degree failure");
int deg_block_sz = 256;
hipLaunchKernelGGL(( degree), dim3((m+deg_block_sz-1)/deg_block_sz), dim3(deg_block_sz), 0, 0, d_e1, d_e2, d_d, d_v_size, m);
cudaCheckErrors("Kernel degree launch failure");
hipLaunchKernelGGL(( prefix_sum), dim3(1),dim3(1), 0, 0, d_v_size, n);
cudaCheckErrors("Kernel prefix_sum launch failure");
int d[n];
v_size = (int*) malloc(n * sizeof(int));
hipMemcpy(d, d_d, n*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(v_size, d_v_size, n*sizeof(int), hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy degree failure");
int *d_v, *d_v_i;
hipMalloc(&d_v, m*sizeof(int));
hipMalloc(&d_v_i, n*sizeof(int));
cudaCheckErrors("hipMalloc adjacency_matrix failure");
hipMemset(d_v_i, 0, n*sizeof(int));
cudaCheckErrors("hipMemset adjacency_matrix failure");
hipLaunchKernelGGL(( adj), dim3((m+deg_block_sz-1)/deg_block_sz), dim3(deg_block_sz), 0, 0, d_e1, d_e2, d_v, d_v_i, d_v_size, m);
cudaCheckErrors("Kernel adjacency_matrix launch failure");
v = (int*) malloc(m * sizeof(int));
hipMemcpy(v, d_v, m*sizeof(int), hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy adjacency_matrix failure");
// cout<<"printing degrees\n";
// for(int i=0;i<n;i++)
// {
// cout<<i<<" "<<d[i]<<"\n";
// }
// cout<<"\n";
// Only those nodes will form k-clique that have degree >= k-1.
int imp_size = 0;
for(int i = 0; i < n; i++)
{
if(d[i] >= k - 1)
imp_size++;
}
int *imp = (int*) calloc(imp_size, sizeof(int));
for(int i = 0, j = 0; i < n; i++)
{
if(d[i] >= k - 1)
{
imp[j] = i;
j++;
}
}
// cout<<"Printing important vec of size = "<< imp_size<<endl;
// for(int i=0;i<imp_size;i++)
// {
// cout<<imp[i]<<" ";
// }
// cout<<"\n\n";
/*
need imp, imp_size, k, i, cnt, v, v_size in gpu memory
d_v_size and d_v are already in gpu memory
remaining imp, imp_size, k, i and cnt
*/
int *cnt = (int*)malloc(imp_size*sizeof(int));
int *d_imp, *d_imp_size, *d_k, *d_cnt;
hipMalloc(&d_imp, imp_size*sizeof(int));
hipMalloc(&d_imp_size, sizeof(int));
hipMalloc(&d_k, sizeof(int));
hipMalloc(&d_cnt, imp_size*sizeof(int));
cudaCheckErrors("hipMalloc failure");
hipMemset(d_cnt, 0, imp_size*sizeof(int));
cudaCheckErrors("hipMemset failure");
hipMemcpy(d_imp, imp, imp_size*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_imp_size, &imp_size, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_k, &k, sizeof(int), hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy failure");
// making G linear
bool *G_linear = (bool*)malloc(imp_size * imp_size * sizeof(bool));
for(int i = 0; i < imp_size; i++)
{
int vertex1 = imp[i];
for(int j = 0; j < imp_size; j++)
{
int vertex2 = imp[j];
G_linear[i*imp_size + j] = (mp.find({vertex1, vertex2}) != mp.end());
}
}
// printf("Printing G_linear \n");
// for(int i = 0; i < imp_size * imp_size; i++)
// {
// if(i%imp_size==0)cout<<"\n";
// printf("%d ", G_linear[i]);
// }
// printf("\n \n");
// storing G_linear in gpu
bool *d_G_linear;
hipMalloc(&d_G_linear, imp_size * imp_size * sizeof(bool));
cudaCheckErrors("hipMalloc G_linear failure");
hipMemcpy(d_G_linear, G_linear, imp_size * imp_size * sizeof(bool), hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy G_linear failure");
// cout<<"start "<<(imp_size+1023)/1024<<endl;
hipLaunchKernelGGL(( find_iterative), dim3((imp_size+1023)/1024),dim3(1024), 0, 0, d_k, d_G_linear, d_imp, d_imp_size, d_cnt);
// End Time
auto end_time = high_resolution_clock::now();
//------------------------ ALGORITHM Ends ---------------------------->
//------------------------ OUTPUT Starts ----------------------------->
hipMemcpy(cnt, d_cnt, imp_size*sizeof(int), hipMemcpyDeviceToHost);
long long ans=0;
for(int i=0;i<12;i++)
{
ans+=cnt[i];
// cout<<cnt[i]<<"\n";
}
// Calculating time duration.
auto duration = duration_cast<microseconds> (end_time - start_time);
float time_us = duration.count();
float time_ms = (float) duration.count() / 1000;
float time_s = (float) duration.count() / 1000000;
printf("%ld \n", ans);
printf("Time Taken -> \n");
printf("%.3f seconds \n", time_s);
printf("%.3f milliseconds \n", time_ms);
printf("%.3f microseconds \n", time_us);
//------------------------- OUTPUT Ends ------------------------------>
} | 033eec2c456caf2e0fc8f01e60b5d344055e9fe7.cu | // G5
// Finding number of K-Cliques in an undirected graph
// Find made iterative, one thread per subtree
#include <iostream>
#include <algorithm>
#include <map>
#include <chrono>
#include <assert.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
using namespace std;
using namespace std::chrono;
// It will store graph like adjacency list.
int *v;
int *v_size;
int n, m, k;
// It will store the degree of each node
__global__ void degree(int *e1, int *e2, int *d, int *v_size, int m)
{
int idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < m)
{
int x = e1[idx], y = e2[idx];
int *dx = &d[x], *dy = &d[y];
atomicAdd(dx,1);
atomicAdd(dy,1);
atomicAdd(&v_size[x],1);
}
}
__global__ void prefix_sum(int *v_size, int n)
{
for(int i = 1; i < n; i++)
{
v_size[i] += v_size[i - 1];
}
}
__global__ void adj(int *e1, int *e2, int *v, int *v_i, int *v_size, int m)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < m)
{
int x = e1[idx], y = e2[idx];
// x is smaller than y
int i = atomicAdd(&v_i[x], 1);
i += v_size[x-1];
assert(i<m);
v[i]=y;
}
}
__global__ void find_iterative(int *d_k, bool *G_linear, int *imp, int *d_imp_size, int *cnt)
{
int rootIdx = (blockIdx.x * blockDim.x) + threadIdx.x;
int imp_size = (*d_imp_size);
if (rootIdx>=imp_size)return;
int k = (*d_k);
int root = imp[rootIdx];
int thread_count=0;
int lvl = 2;
bool** lvl_vertices = (bool**)malloc((k + 1) * sizeof(bool*));
int* num_lvl_vertices = (int*)malloc((k + 1) * sizeof(int));
int* cur_vertex_id = (int*)malloc((k + 1) * sizeof(int));
// The part of G_linear from root*imp_size to root*imp_size + imp_size - 1;
lvl_vertices[lvl] = &(G_linear[rootIdx * imp_size]);
// printf("inside Kernel, root = %d \n", root);
// for(int i = 0; i < imp_size; i++)
// printf("%d ", lvl_vertices[lvl][i]);
// printf("\n");
cur_vertex_id[lvl] = 0;
while(cur_vertex_id[lvl] < imp_size)
{
// printf("root = %d, lvl = %d, cur_vertex_id = %d \n", root, lvl, cur_vertex_id[lvl]);
if(lvl_vertices[lvl][cur_vertex_id[lvl]] == 0)
{
// printf("Continued from cur_vertex_id = %d \n", cur_vertex_id[lvl]);
cur_vertex_id[lvl]++;
while(cur_vertex_id[lvl] == imp_size && lvl > 2)
{
// Go to parent level
lvl = lvl - 1;
// Go to parent's next sibling
cur_vertex_id[lvl]++;
}
continue;
}
// vertex = imp[cur_vertex_id[lvl]];
// vertex's adjacency list is the part of G_linear from vertex_id * imp_size to vertex_id * imp_size + imp_size - 1
bool* adj_vertex = (bool*)malloc(imp_size * sizeof(bool));
adj_vertex = &(G_linear[cur_vertex_id[lvl] * imp_size]);
// printf("Till now ->\t\t\t");
// for(int i = 0; i < imp_size; i++)
// printf("%d ", lvl_vertices[lvl][i]);
// printf("\n");
// printf("Adjacency list ->\t");
// for(int i = 0; i < imp_size; i++)
// printf("%d ", adj_vertex[i]);
// printf("\n");
// intersec of adj_vertex[] with lvl_vertices[lvl][]
lvl_vertices[lvl + 1] = (bool*)malloc(imp_size * sizeof(bool));
num_lvl_vertices[lvl + 1] = 0;
for(int i = 0; i < imp_size; i++)
{
lvl_vertices[lvl + 1][i] = (lvl_vertices[lvl][i] & adj_vertex[i]);
if(lvl_vertices[lvl + 1][i] == 1)
{
num_lvl_vertices[lvl + 1]++;
}
}
// printf("Intersection ->\t\t");
// for(int i = 0; i < imp_size; i++)
// printf("%d ", lvl_vertices[lvl + 1][i]);
// printf("\n");
// printf("num_lvl_vertices = %d \n \n", num_lvl_vertices[lvl + 1]);
if(num_lvl_vertices[lvl + 1] > 0 && lvl + 1 < k)
{
lvl++;
cur_vertex_id[lvl] = 0;
}
else
{
if(lvl + 1 == k)
{
thread_count+=num_lvl_vertices[lvl + 1];
}
// Go to next sibling
cur_vertex_id[lvl]++;
while(cur_vertex_id[lvl] == imp_size && lvl > 2)
{
// Go to parent level
lvl = lvl - 1;
// Go to parent's next sibling
cur_vertex_id[lvl]++;
}
}
}
cnt[rootIdx] = thread_count;
//printf("\nTotal count = %d\n--------------------------------------\n\n", thread_count);
}
int main()
{
#ifndef ONLINE_JUDGE
freopen("./input.txt", "r", stdin);
//freopen("output.txt", "w", stdout);
#endif
//--------------------------- INPUT Starts ----------------------------->
// First line of input should contain number of edges m and size of clique k.
scanf("%d %d", &m, &k);
n = 0;
// map to remove duplicate edges
map<pair<int,int>,int> mp;
for(int i=0; i<m; i++)
{
int x,y;
scanf("%d %d", &x, &y);
// x must smaller than y
if(x > y) swap(x,y);
if(x != y) mp[{x,y}] = 1;
n = max(n, y);
}
n++;
m = mp.size();
// Storing unique edges in e1[i] - e2[i]
int *e1 = (int*) malloc(m * sizeof(int));
int *e2 = (int*) malloc(m * sizeof(int));
int i = 0;
for(auto x: mp)
{
e1[i] = x.first.first;
e2[i] = x.first.second;
i++;
}
// edges in device
int *d_e1, *d_e2;
cudaMalloc(&d_e1, m*sizeof(int));
cudaMalloc(&d_e2, m*sizeof(int));
cudaCheckErrors("cudaMalloc edges failure");
cudaMemcpy(d_e1, e1, m*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_e2, e2, m*sizeof(int), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy edges failure");
//--------------------------- INPUT Ends ------------------------------->
//------------------------ ALGORITHM Starts ---------------------------->
// Start Time
auto start_time = high_resolution_clock::now();
// degree of nodes in device
int *d_d, *d_v_size;
cudaMalloc(&d_d, n*sizeof(int));
cudaMalloc(&d_v_size, n*sizeof(int));
cudaCheckErrors("cudaMalloc degree failure");
cudaMemset(d_d, 0, n*sizeof(int));
cudaMemset(d_v_size, 0, n*sizeof(int));
cudaCheckErrors("cudaMemset degree failure");
int deg_block_sz = 256;
degree<<<(m+deg_block_sz-1)/deg_block_sz, deg_block_sz>>>(d_e1, d_e2, d_d, d_v_size, m);
cudaCheckErrors("Kernel degree launch failure");
prefix_sum<<<1,1>>>(d_v_size, n);
cudaCheckErrors("Kernel prefix_sum launch failure");
int d[n];
v_size = (int*) malloc(n * sizeof(int));
cudaMemcpy(d, d_d, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(v_size, d_v_size, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy degree failure");
int *d_v, *d_v_i;
cudaMalloc(&d_v, m*sizeof(int));
cudaMalloc(&d_v_i, n*sizeof(int));
cudaCheckErrors("cudaMalloc adjacency_matrix failure");
cudaMemset(d_v_i, 0, n*sizeof(int));
cudaCheckErrors("cudaMemset adjacency_matrix failure");
adj<<<(m+deg_block_sz-1)/deg_block_sz, deg_block_sz>>>(d_e1, d_e2, d_v, d_v_i, d_v_size, m);
cudaCheckErrors("Kernel adjacency_matrix launch failure");
v = (int*) malloc(m * sizeof(int));
cudaMemcpy(v, d_v, m*sizeof(int), cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy adjacency_matrix failure");
// cout<<"printing degrees\n";
// for(int i=0;i<n;i++)
// {
// cout<<i<<" "<<d[i]<<"\n";
// }
// cout<<"\n";
// Only those nodes will form k-clique that have degree >= k-1.
int imp_size = 0;
for(int i = 0; i < n; i++)
{
if(d[i] >= k - 1)
imp_size++;
}
int *imp = (int*) calloc(imp_size, sizeof(int));
for(int i = 0, j = 0; i < n; i++)
{
if(d[i] >= k - 1)
{
imp[j] = i;
j++;
}
}
// cout<<"Printing important vec of size = "<< imp_size<<endl;
// for(int i=0;i<imp_size;i++)
// {
// cout<<imp[i]<<" ";
// }
// cout<<"\n\n";
/*
need imp, imp_size, k, i, cnt, v, v_size in gpu memory
d_v_size and d_v are already in gpu memory
remaining imp, imp_size, k, i and cnt
*/
int *cnt = (int*)malloc(imp_size*sizeof(int));
int *d_imp, *d_imp_size, *d_k, *d_cnt;
cudaMalloc(&d_imp, imp_size*sizeof(int));
cudaMalloc(&d_imp_size, sizeof(int));
cudaMalloc(&d_k, sizeof(int));
cudaMalloc(&d_cnt, imp_size*sizeof(int));
cudaCheckErrors("cudaMalloc failure");
cudaMemset(d_cnt, 0, imp_size*sizeof(int));
cudaCheckErrors("cudaMemset failure");
cudaMemcpy(d_imp, imp, imp_size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_imp_size, &imp_size, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_k, &k, sizeof(int), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy failure");
// making G linear
bool *G_linear = (bool*)malloc(imp_size * imp_size * sizeof(bool));
for(int i = 0; i < imp_size; i++)
{
int vertex1 = imp[i];
for(int j = 0; j < imp_size; j++)
{
int vertex2 = imp[j];
G_linear[i*imp_size + j] = (mp.find({vertex1, vertex2}) != mp.end());
}
}
// printf("Printing G_linear \n");
// for(int i = 0; i < imp_size * imp_size; i++)
// {
// if(i%imp_size==0)cout<<"\n";
// printf("%d ", G_linear[i]);
// }
// printf("\n \n");
// storing G_linear in gpu
bool *d_G_linear;
cudaMalloc(&d_G_linear, imp_size * imp_size * sizeof(bool));
cudaCheckErrors("cudaMalloc G_linear failure");
cudaMemcpy(d_G_linear, G_linear, imp_size * imp_size * sizeof(bool), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy G_linear failure");
// cout<<"start "<<(imp_size+1023)/1024<<endl;
find_iterative<<<(imp_size+1023)/1024,1024>>>(d_k, d_G_linear, d_imp, d_imp_size, d_cnt);
// End Time
auto end_time = high_resolution_clock::now();
//------------------------ ALGORITHM Ends ---------------------------->
//------------------------ OUTPUT Starts ----------------------------->
cudaMemcpy(cnt, d_cnt, imp_size*sizeof(int), cudaMemcpyDeviceToHost);
long long ans=0;
for(int i=0;i<12;i++)
{
ans+=cnt[i];
// cout<<cnt[i]<<"\n";
}
// Calculating time duration.
auto duration = duration_cast<microseconds> (end_time - start_time);
float time_us = duration.count();
float time_ms = (float) duration.count() / 1000;
float time_s = (float) duration.count() / 1000000;
printf("%ld \n", ans);
printf("Time Taken -> \n");
printf("%.3f seconds \n", time_s);
printf("%.3f milliseconds \n", time_ms);
printf("%.3f microseconds \n", time_us);
//------------------------- OUTPUT Ends ------------------------------>
} |
71761ee089436a1c2d704a02c748b460fb8011ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__
void solve_jit_flipped(double *rateConst, double *state, double *deriv, int numcell)
{
size_t tid;
double rate;
tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numcell) {
rate = rateConst[tid+1000000*0];
rate *= state[tid+1000000*8];
rate *= state[tid+1000000*8];
rate *= state[tid+1000000*18];
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*18] -= rate;
deriv[tid+1000000*14] += rate;
rate = rateConst[tid+1000000*1];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*4];
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*4] -= rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*4] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*12] += rate;
rate = rateConst[tid+1000000*2];
rate *= state[tid+1000000*10];
rate *= state[tid+1000000*14];
rate *= state[tid+1000000*10];
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*0] += rate;
rate = rateConst[tid+1000000*3];
rate *= state[tid+1000000*14];
rate *= state[tid+1000000*7];
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*13] += rate;
rate = rateConst[tid+1000000*4];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*16];
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*16] -= rate;
deriv[tid+1000000*5] += rate;
rate = rateConst[tid+1000000*5];
rate *= state[tid+1000000*11];
rate *= state[tid+1000000*8];
deriv[tid+1000000*11] -= rate;
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*19] += rate;
rate = rateConst[tid+1000000*6];
rate *= state[tid+1000000*16];
rate *= state[tid+1000000*1];
rate *= state[tid+1000000*14];
deriv[tid+1000000*16] -= rate;
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*8] += rate;
rate = rateConst[tid+1000000*7];
rate *= state[tid+1000000*12];
rate *= state[tid+1000000*8];
deriv[tid+1000000*12] -= rate;
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*19] += rate;
rate = rateConst[tid+1000000*8];
rate *= state[tid+1000000*7];
rate *= state[tid+1000000*12];
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*12] -= rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*12] += rate;
rate = rateConst[tid+1000000*9];
rate *= state[tid+1000000*0];
rate *= state[tid+1000000*3];
deriv[tid+1000000*0] -= rate;
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*19] += rate;
rate = rateConst[tid+1000000*10];
rate *= state[tid+1000000*3];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*18];
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*18] -= rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*10] += rate;
rate = rateConst[tid+1000000*11];
rate *= state[tid+1000000*18];
rate *= state[tid+1000000*19];
deriv[tid+1000000*18] -= rate;
deriv[tid+1000000*19] -= rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*6] += rate;
rate = rateConst[tid+1000000*12];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*0];
rate *= state[tid+1000000*14];
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*0] -= rate;
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*9] += rate;
rate = rateConst[tid+1000000*13];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*11];
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*11] -= rate;
deriv[tid+1000000*4] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*16] += rate;
rate = rateConst[tid+1000000*14];
rate *= state[tid+1000000*7];
rate *= state[tid+1000000*14];
rate *= state[tid+1000000*6];
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*6] -= rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*5] += rate;
rate = rateConst[tid+1000000*15];
rate *= state[tid+1000000*2];
rate *= state[tid+1000000*4];
rate *= state[tid+1000000*10];
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*4] -= rate;
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*2] += rate;
rate = rateConst[tid+1000000*16];
rate *= state[tid+1000000*6];
rate *= state[tid+1000000*2];
rate *= state[tid+1000000*2];
deriv[tid+1000000*6] -= rate;
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*13] += rate;
rate = rateConst[tid+1000000*17];
rate *= state[tid+1000000*13];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*1];
deriv[tid+1000000*13] -= rate;
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*2] += rate;
rate = rateConst[tid+1000000*18];
rate *= state[tid+1000000*19];
rate *= state[tid+1000000*10];
rate *= state[tid+1000000*6];
deriv[tid+1000000*19] -= rate;
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*6] -= rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*18] += rate;
rate = rateConst[tid+1000000*19];
rate *= state[tid+1000000*12];
rate *= state[tid+1000000*4];
deriv[tid+1000000*12] -= rate;
deriv[tid+1000000*4] -= rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*11] += rate;
rate = rateConst[tid+1000000*20];
rate *= state[tid+1000000*2];
rate *= state[tid+1000000*18];
rate *= state[tid+1000000*2];
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*18] -= rate;
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*0] += rate;
rate = rateConst[tid+1000000*21];
rate *= state[tid+1000000*17];
rate *= state[tid+1000000*17];
rate *= state[tid+1000000*7];
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*1] += rate;
rate = rateConst[tid+1000000*22];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*15];
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*10] += rate;
rate = rateConst[tid+1000000*23];
rate *= state[tid+1000000*3];
rate *= state[tid+1000000*17];
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*0] += rate;
rate = rateConst[tid+1000000*24];
rate *= state[tid+1000000*3];
rate *= state[tid+1000000*17];
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*4] += rate;
rate = rateConst[tid+1000000*25];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*3];
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*5] += rate;
rate = rateConst[tid+1000000*26];
rate *= state[tid+1000000*2];
rate *= state[tid+1000000*8];
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*2] += rate;
rate = rateConst[tid+1000000*27];
rate *= state[tid+1000000*6];
rate *= state[tid+1000000*18];
rate *= state[tid+1000000*3];
deriv[tid+1000000*6] -= rate;
deriv[tid+1000000*18] -= rate;
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*12] += rate;
rate = rateConst[tid+1000000*28];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*10];
rate *= state[tid+1000000*17];
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*9] += rate;
rate = rateConst[tid+1000000*29];
rate *= state[tid+1000000*9];
rate *= state[tid+1000000*2];
deriv[tid+1000000*9] -= rate;
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*4] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*10] += rate;
rate = rateConst[tid+1000000*30];
rate *= state[tid+1000000*10];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*14];
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*15] += rate;
rate = rateConst[tid+1000000*31];
rate *= state[tid+1000000*19];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*19];
deriv[tid+1000000*19] -= rate;
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*19] -= rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*17] += rate;
rate = rateConst[tid+1000000*32];
rate *= state[tid+1000000*14];
rate *= state[tid+1000000*18];
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*18] -= rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*1] += rate;
rate = rateConst[tid+1000000*33];
rate *= state[tid+1000000*13];
rate *= state[tid+1000000*3];
rate *= state[tid+1000000*13];
deriv[tid+1000000*13] -= rate;
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*13] -= rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*19] += rate;
rate = rateConst[tid+1000000*34];
rate *= state[tid+1000000*12];
rate *= state[tid+1000000*17];
rate *= state[tid+1000000*7];
deriv[tid+1000000*12] -= rate;
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*4] += rate;
rate = rateConst[tid+1000000*35];
rate *= state[tid+1000000*2];
rate *= state[tid+1000000*12];
rate *= state[tid+1000000*3];
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*12] -= rate;
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*9] += rate;
rate = rateConst[tid+1000000*36];
rate *= state[tid+1000000*10];
rate *= state[tid+1000000*10];
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*11] += rate;
rate = rateConst[tid+1000000*37];
rate *= state[tid+1000000*8];
rate *= state[tid+1000000*1];
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*15] += rate;
rate = rateConst[tid+1000000*38];
rate *= state[tid+1000000*19];
rate *= state[tid+1000000*8];
deriv[tid+1000000*19] -= rate;
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*2] += rate;
rate = rateConst[tid+1000000*39];
rate *= state[tid+1000000*7];
rate *= state[tid+1000000*10];
rate *= state[tid+1000000*17];
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*19] += rate;
rate = rateConst[tid+1000000*40];
rate *= state[tid+1000000*1];
rate *= state[tid+1000000*12];
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*12] -= rate;
deriv[tid+1000000*3] += rate;
rate = rateConst[tid+1000000*41];
rate *= state[tid+1000000*1];
rate *= state[tid+1000000*1];
rate *= state[tid+1000000*4];
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*4] -= rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*9] += rate;
rate = rateConst[tid+1000000*42];
rate *= state[tid+1000000*7];
rate *= state[tid+1000000*9];
rate *= state[tid+1000000*7];
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*9] -= rate;
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*18] += rate;
rate = rateConst[tid+1000000*43];
rate *= state[tid+1000000*13];
rate *= state[tid+1000000*3];
deriv[tid+1000000*13] -= rate;
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*0] += rate;
rate = rateConst[tid+1000000*44];
rate *= state[tid+1000000*1];
rate *= state[tid+1000000*6];
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*6] -= rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*15] += rate;
rate = rateConst[tid+1000000*45];
rate *= state[tid+1000000*7];
rate *= state[tid+1000000*11];
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*11] -= rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*4] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*16] += rate;
rate = rateConst[tid+1000000*46];
rate *= state[tid+1000000*1];
rate *= state[tid+1000000*9];
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*9] -= rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*4] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*5] += rate;
rate = rateConst[tid+1000000*47];
rate *= state[tid+1000000*3];
rate *= state[tid+1000000*9];
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*9] -= rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*18] += rate;
rate = rateConst[tid+1000000*48];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*8];
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*4] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*18] += rate;
rate = rateConst[tid+1000000*49];
rate *= state[tid+1000000*17];
rate *= state[tid+1000000*11];
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*11] -= rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*5] += rate;
}
}
| 71761ee089436a1c2d704a02c748b460fb8011ea.cu |
extern "C" __global__
void solve_jit_flipped(double *rateConst, double *state, double *deriv, int numcell)
{
size_t tid;
double rate;
tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numcell) {
rate = rateConst[tid+1000000*0];
rate *= state[tid+1000000*8];
rate *= state[tid+1000000*8];
rate *= state[tid+1000000*18];
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*18] -= rate;
deriv[tid+1000000*14] += rate;
rate = rateConst[tid+1000000*1];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*4];
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*4] -= rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*4] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*12] += rate;
rate = rateConst[tid+1000000*2];
rate *= state[tid+1000000*10];
rate *= state[tid+1000000*14];
rate *= state[tid+1000000*10];
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*0] += rate;
rate = rateConst[tid+1000000*3];
rate *= state[tid+1000000*14];
rate *= state[tid+1000000*7];
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*13] += rate;
rate = rateConst[tid+1000000*4];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*16];
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*16] -= rate;
deriv[tid+1000000*5] += rate;
rate = rateConst[tid+1000000*5];
rate *= state[tid+1000000*11];
rate *= state[tid+1000000*8];
deriv[tid+1000000*11] -= rate;
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*19] += rate;
rate = rateConst[tid+1000000*6];
rate *= state[tid+1000000*16];
rate *= state[tid+1000000*1];
rate *= state[tid+1000000*14];
deriv[tid+1000000*16] -= rate;
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*8] += rate;
rate = rateConst[tid+1000000*7];
rate *= state[tid+1000000*12];
rate *= state[tid+1000000*8];
deriv[tid+1000000*12] -= rate;
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*19] += rate;
rate = rateConst[tid+1000000*8];
rate *= state[tid+1000000*7];
rate *= state[tid+1000000*12];
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*12] -= rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*12] += rate;
rate = rateConst[tid+1000000*9];
rate *= state[tid+1000000*0];
rate *= state[tid+1000000*3];
deriv[tid+1000000*0] -= rate;
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*19] += rate;
rate = rateConst[tid+1000000*10];
rate *= state[tid+1000000*3];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*18];
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*18] -= rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*10] += rate;
rate = rateConst[tid+1000000*11];
rate *= state[tid+1000000*18];
rate *= state[tid+1000000*19];
deriv[tid+1000000*18] -= rate;
deriv[tid+1000000*19] -= rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*6] += rate;
rate = rateConst[tid+1000000*12];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*0];
rate *= state[tid+1000000*14];
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*0] -= rate;
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*9] += rate;
rate = rateConst[tid+1000000*13];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*11];
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*11] -= rate;
deriv[tid+1000000*4] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*16] += rate;
rate = rateConst[tid+1000000*14];
rate *= state[tid+1000000*7];
rate *= state[tid+1000000*14];
rate *= state[tid+1000000*6];
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*6] -= rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*5] += rate;
rate = rateConst[tid+1000000*15];
rate *= state[tid+1000000*2];
rate *= state[tid+1000000*4];
rate *= state[tid+1000000*10];
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*4] -= rate;
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*2] += rate;
rate = rateConst[tid+1000000*16];
rate *= state[tid+1000000*6];
rate *= state[tid+1000000*2];
rate *= state[tid+1000000*2];
deriv[tid+1000000*6] -= rate;
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*13] += rate;
rate = rateConst[tid+1000000*17];
rate *= state[tid+1000000*13];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*1];
deriv[tid+1000000*13] -= rate;
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*2] += rate;
rate = rateConst[tid+1000000*18];
rate *= state[tid+1000000*19];
rate *= state[tid+1000000*10];
rate *= state[tid+1000000*6];
deriv[tid+1000000*19] -= rate;
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*6] -= rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*18] += rate;
rate = rateConst[tid+1000000*19];
rate *= state[tid+1000000*12];
rate *= state[tid+1000000*4];
deriv[tid+1000000*12] -= rate;
deriv[tid+1000000*4] -= rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*11] += rate;
rate = rateConst[tid+1000000*20];
rate *= state[tid+1000000*2];
rate *= state[tid+1000000*18];
rate *= state[tid+1000000*2];
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*18] -= rate;
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*0] += rate;
rate = rateConst[tid+1000000*21];
rate *= state[tid+1000000*17];
rate *= state[tid+1000000*17];
rate *= state[tid+1000000*7];
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*1] += rate;
rate = rateConst[tid+1000000*22];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*15];
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*10] += rate;
rate = rateConst[tid+1000000*23];
rate *= state[tid+1000000*3];
rate *= state[tid+1000000*17];
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*0] += rate;
rate = rateConst[tid+1000000*24];
rate *= state[tid+1000000*3];
rate *= state[tid+1000000*17];
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*4] += rate;
rate = rateConst[tid+1000000*25];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*3];
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*5] += rate;
rate = rateConst[tid+1000000*26];
rate *= state[tid+1000000*2];
rate *= state[tid+1000000*8];
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*2] += rate;
rate = rateConst[tid+1000000*27];
rate *= state[tid+1000000*6];
rate *= state[tid+1000000*18];
rate *= state[tid+1000000*3];
deriv[tid+1000000*6] -= rate;
deriv[tid+1000000*18] -= rate;
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*12] += rate;
rate = rateConst[tid+1000000*28];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*10];
rate *= state[tid+1000000*17];
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*9] += rate;
rate = rateConst[tid+1000000*29];
rate *= state[tid+1000000*9];
rate *= state[tid+1000000*2];
deriv[tid+1000000*9] -= rate;
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*4] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*10] += rate;
rate = rateConst[tid+1000000*30];
rate *= state[tid+1000000*10];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*14];
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*15] += rate;
rate = rateConst[tid+1000000*31];
rate *= state[tid+1000000*19];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*19];
deriv[tid+1000000*19] -= rate;
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*19] -= rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*17] += rate;
rate = rateConst[tid+1000000*32];
rate *= state[tid+1000000*14];
rate *= state[tid+1000000*18];
deriv[tid+1000000*14] -= rate;
deriv[tid+1000000*18] -= rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*1] += rate;
rate = rateConst[tid+1000000*33];
rate *= state[tid+1000000*13];
rate *= state[tid+1000000*3];
rate *= state[tid+1000000*13];
deriv[tid+1000000*13] -= rate;
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*13] -= rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*14] += rate;
deriv[tid+1000000*19] += rate;
rate = rateConst[tid+1000000*34];
rate *= state[tid+1000000*12];
rate *= state[tid+1000000*17];
rate *= state[tid+1000000*7];
deriv[tid+1000000*12] -= rate;
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*4] += rate;
rate = rateConst[tid+1000000*35];
rate *= state[tid+1000000*2];
rate *= state[tid+1000000*12];
rate *= state[tid+1000000*3];
deriv[tid+1000000*2] -= rate;
deriv[tid+1000000*12] -= rate;
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*9] += rate;
rate = rateConst[tid+1000000*36];
rate *= state[tid+1000000*10];
rate *= state[tid+1000000*10];
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*3] += rate;
deriv[tid+1000000*7] += rate;
deriv[tid+1000000*11] += rate;
rate = rateConst[tid+1000000*37];
rate *= state[tid+1000000*8];
rate *= state[tid+1000000*1];
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*1] += rate;
deriv[tid+1000000*15] += rate;
rate = rateConst[tid+1000000*38];
rate *= state[tid+1000000*19];
rate *= state[tid+1000000*8];
deriv[tid+1000000*19] -= rate;
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*2] += rate;
rate = rateConst[tid+1000000*39];
rate *= state[tid+1000000*7];
rate *= state[tid+1000000*10];
rate *= state[tid+1000000*17];
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*10] -= rate;
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*19] += rate;
rate = rateConst[tid+1000000*40];
rate *= state[tid+1000000*1];
rate *= state[tid+1000000*12];
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*12] -= rate;
deriv[tid+1000000*3] += rate;
rate = rateConst[tid+1000000*41];
rate *= state[tid+1000000*1];
rate *= state[tid+1000000*1];
rate *= state[tid+1000000*4];
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*4] -= rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*17] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*0] += rate;
deriv[tid+1000000*9] += rate;
rate = rateConst[tid+1000000*42];
rate *= state[tid+1000000*7];
rate *= state[tid+1000000*9];
rate *= state[tid+1000000*7];
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*9] -= rate;
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*18] += rate;
rate = rateConst[tid+1000000*43];
rate *= state[tid+1000000*13];
rate *= state[tid+1000000*3];
deriv[tid+1000000*13] -= rate;
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*0] += rate;
rate = rateConst[tid+1000000*44];
rate *= state[tid+1000000*1];
rate *= state[tid+1000000*6];
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*6] -= rate;
deriv[tid+1000000*13] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*15] += rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*12] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*15] += rate;
rate = rateConst[tid+1000000*45];
rate *= state[tid+1000000*7];
rate *= state[tid+1000000*11];
deriv[tid+1000000*7] -= rate;
deriv[tid+1000000*11] -= rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*19] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*4] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*16] += rate;
rate = rateConst[tid+1000000*46];
rate *= state[tid+1000000*1];
rate *= state[tid+1000000*9];
deriv[tid+1000000*1] -= rate;
deriv[tid+1000000*9] -= rate;
deriv[tid+1000000*9] += rate;
deriv[tid+1000000*18] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*10] += rate;
deriv[tid+1000000*4] += rate;
deriv[tid+1000000*16] += rate;
deriv[tid+1000000*6] += rate;
deriv[tid+1000000*11] += rate;
deriv[tid+1000000*5] += rate;
rate = rateConst[tid+1000000*47];
rate *= state[tid+1000000*3];
rate *= state[tid+1000000*9];
deriv[tid+1000000*3] -= rate;
deriv[tid+1000000*9] -= rate;
deriv[tid+1000000*2] += rate;
deriv[tid+1000000*18] += rate;
rate = rateConst[tid+1000000*48];
rate *= state[tid+1000000*5];
rate *= state[tid+1000000*15];
rate *= state[tid+1000000*8];
deriv[tid+1000000*5] -= rate;
deriv[tid+1000000*15] -= rate;
deriv[tid+1000000*8] -= rate;
deriv[tid+1000000*4] += rate;
deriv[tid+1000000*5] += rate;
deriv[tid+1000000*18] += rate;
rate = rateConst[tid+1000000*49];
rate *= state[tid+1000000*17];
rate *= state[tid+1000000*11];
deriv[tid+1000000*17] -= rate;
deriv[tid+1000000*11] -= rate;
deriv[tid+1000000*8] += rate;
deriv[tid+1000000*5] += rate;
}
}
|
158bb34fcddb9509615fbba7c08940cd3e7ab99b.hip | // !!! This is a file automatically generated by hipify!!!
#include "stereo.h"
Stereo::Stereo() {
this->BlockWidth = 32;
this->BlockHeight = 12;
this->StrideAlignment = 32;
}
Stereo::Stereo(int BlockWidth, int BlockHeight, int StrideAlignment) {
this->BlockWidth = BlockWidth;
this->BlockHeight = BlockHeight;
this->StrideAlignment = StrideAlignment;
}
int Stereo::initializeOpticalFlow(int width, int height, int channels, int inputType, int nLevels, float scale, float lambda,
float theta, float tau, int nWarpIters, int nSolverIters)
{
//allocate all memories
this->width = width;
this->height = height;
this->stride = iAlignUp(width);
this->inputType = inputType;
this->fScale = scale;
this->nLevels = nLevels;
this->inputChannels = channels;
this->nSolverIters = nSolverIters; //number of inner iteration (ROF loop)
this->nWarpIters = nWarpIters;
this->lambda = lambda;
this->theta = theta;
this->tau = tau;
pI0 = std::vector<float*>(nLevels);
pI1 = std::vector<float*>(nLevels);
pW = std::vector<int>(nLevels);
pH = std::vector<int>(nLevels);
pS = std::vector<int>(nLevels);
pDataSize = std::vector<int>(nLevels);
int newHeight = height;
int newWidth = width;
int newStride = iAlignUp(width);
//std::cout << "Pyramid Sizes: " << newWidth << " " << newHeight << " " << newStride << std::endl;
for (int level = 0; level < nLevels; level++) {
pDataSize[level] = newStride * newHeight * sizeof(float);
checkCudaErrors(hipMalloc(&pI0[level], pDataSize[level]));
checkCudaErrors(hipMalloc(&pI1[level], pDataSize[level]));
pW[level] = newWidth;
pH[level] = newHeight;
pS[level] = newStride;
newHeight = newHeight / fScale;
newWidth = newWidth / fScale;
newStride = iAlignUp(newWidth);
}
//runtime
dataSize = stride * height * sizeof(float);
dataSize8uc3 = stride * height * sizeof(uchar3);
dataSize8u = stride * height * sizeof(uchar);
dataSize32f = dataSize;
dataSize32fc3 = dataSize * 3;
checkCudaErrors(hipMalloc(&d_i1warp, dataSize));
checkCudaErrors(hipMalloc(&d_du, dataSize));
checkCudaErrors(hipMalloc(&d_dv, dataSize));
checkCudaErrors(hipMalloc(&d_dus, dataSize));
checkCudaErrors(hipMalloc(&d_dvs, dataSize));
checkCudaErrors(hipMalloc(&d_dumed, dataSize));
checkCudaErrors(hipMalloc(&d_dvmed, dataSize));
checkCudaErrors(hipMalloc(&d_dumeds, dataSize));
checkCudaErrors(hipMalloc(&d_dvmeds, dataSize));
//dual TV
checkCudaErrors(hipMalloc(&d_pu1, dataSize));
checkCudaErrors(hipMalloc(&d_pu2, dataSize));
checkCudaErrors(hipMalloc(&d_pv1, dataSize));
checkCudaErrors(hipMalloc(&d_pv2, dataSize));
//dual TV temps
checkCudaErrors(hipMalloc(&d_pu1s, dataSize));
checkCudaErrors(hipMalloc(&d_pu2s, dataSize));
checkCudaErrors(hipMalloc(&d_pv1s, dataSize));
checkCudaErrors(hipMalloc(&d_pv2s, dataSize));
checkCudaErrors(hipMalloc(&d_Ix, dataSize));
checkCudaErrors(hipMalloc(&d_Iy, dataSize));
checkCudaErrors(hipMalloc(&d_Iz, dataSize));
checkCudaErrors(hipMalloc(&d_u, dataSize));
checkCudaErrors(hipMalloc(&d_v, dataSize));
checkCudaErrors(hipMalloc(&d_us, dataSize));
checkCudaErrors(hipMalloc(&d_vs, dataSize));
if (inputType == CV_8UC3) {
checkCudaErrors(hipMalloc(&d_i08uc3, dataSize8uc3));
checkCudaErrors(hipMalloc(&d_i18uc3, dataSize8uc3));
}
else if (inputType == CV_8U) {
checkCudaErrors(hipMalloc(&d_i08u, dataSize8u));
checkCudaErrors(hipMalloc(&d_i18u, dataSize8u));
}
// colored uv, for display only
checkCudaErrors(hipMalloc(&d_uvrgb, dataSize * 3));
// Output mats
uvrgb = cv::Mat(height, stride, CV_32FC3);
upad = cv::Mat(height, stride, CV_32F);
vpad = cv::Mat(height, stride, CV_32F);
return 0;
}
int Stereo::initializeFisheyeStereo(int width, int height, int channels, int inputType, int nLevels, float scale, float lambda,
float theta, float tau, int nWarpIters, int nSolverIters) {
//allocate all memories
this->width = width;
this->height = height;
this->stride = iAlignUp(width);
this->inputType = inputType;
this->fScale = scale;
this->nLevels = nLevels;
this->inputChannels = channels;
this->nSolverIters = nSolverIters; //number of inner iteration (ROF loop)
this->nWarpIters = nWarpIters;
this->lambda = lambda;
this->theta = theta;
this->tau = tau;
pI0 = std::vector<float*>(nLevels);
pI1 = std::vector<float*>(nLevels);
pW = std::vector<int>(nLevels);
pH = std::vector<int>(nLevels);
pS = std::vector<int>(nLevels);
pDataSize = std::vector<int>(nLevels);
pTvxForward = std::vector<float*>(nLevels);
pTvyForward = std::vector<float*>(nLevels);
pTvxBackward = std::vector<float*>(nLevels);
pTvyBackward = std::vector<float*>(nLevels);
int newHeight = height;
int newWidth = width;
int newStride = iAlignUp(width);
//std::cout << "Pyramid Sizes: " << newWidth << " " << newHeight << " " << newStride << std::endl;
for (int level = 0; level < nLevels; level++) {
pDataSize[level] = newStride * newHeight * sizeof(float);
checkCudaErrors(hipMalloc(&pI0[level], pDataSize[level]));
checkCudaErrors(hipMalloc(&pI1[level], pDataSize[level]));
checkCudaErrors(hipMalloc(&pTvxForward[level], pDataSize[level]));
checkCudaErrors(hipMalloc(&pTvyForward[level], pDataSize[level]));
checkCudaErrors(hipMalloc(&pTvxBackward[level], pDataSize[level]));
checkCudaErrors(hipMalloc(&pTvyBackward[level], pDataSize[level]));
pW[level] = newWidth;
pH[level] = newHeight;
pS[level] = newStride;
newHeight = newHeight / fScale;
newWidth = newWidth / fScale;
newStride = iAlignUp(newWidth);
}
//runtime
dataSize = stride * height * sizeof(float);
dataSize8uc3 = stride * height * sizeof(uchar3);
dataSize8u = stride * height * sizeof(uchar);
dataSize32f = dataSize;
dataSize32fc3 = dataSize * 3;
checkCudaErrors(hipMalloc(&d_i1warp, dataSize));
checkCudaErrors(hipMalloc(&d_tvxForward, dataSize));
checkCudaErrors(hipMalloc(&d_tvyForward, dataSize));
checkCudaErrors(hipMalloc(&d_tvxBackward, dataSize));
checkCudaErrors(hipMalloc(&d_tvyBackward, dataSize));
checkCudaErrors(hipMalloc(&d_tvx2, dataSize));
checkCudaErrors(hipMalloc(&d_tvy2, dataSize));
checkCudaErrors(hipMalloc(&d_cvx, dataSize));
checkCudaErrors(hipMalloc(&d_cvy, dataSize));
checkCudaErrors(hipMalloc(&d_i1calibrated, dataSize));
checkCudaErrors(hipMalloc(&d_Iw, dataSize));
checkCudaErrors(hipMalloc(&d_Iz, dataSize));
checkCudaErrors(hipMalloc(&d_w, dataSize));
checkCudaErrors(hipMalloc(&d_wForward, dataSize));
checkCudaErrors(hipMalloc(&d_wBackward, dataSize));
checkCudaErrors(hipMalloc(&d_wFinal, dataSize));
checkCudaErrors(hipMalloc(&d_u, dataSize));
checkCudaErrors(hipMalloc(&d_v, dataSize));
checkCudaErrors(hipMalloc(&d_uForward, dataSize));
checkCudaErrors(hipMalloc(&d_vForward, dataSize));
checkCudaErrors(hipMalloc(&d_us, dataSize));
checkCudaErrors(hipMalloc(&d_vs, dataSize));
checkCudaErrors(hipMalloc(&d_ws, dataSize));
checkCudaErrors(hipMalloc(&d_du, dataSize));
checkCudaErrors(hipMalloc(&d_dv, dataSize));
checkCudaErrors(hipMalloc(&d_dw, dataSize));
checkCudaErrors(hipMalloc(&d_dws, dataSize));
checkCudaErrors(hipMalloc(&d_depth, dataSize));
checkCudaErrors(hipMalloc(&d_depthFinal, dataSize));
checkCudaErrors(hipMalloc(&d_occlusion, dataSize));
checkCudaErrors(hipMalloc(&d_dwmed, dataSize));
checkCudaErrors(hipMalloc(&d_dwmeds, dataSize));
checkCudaErrors(hipMalloc(&d_pw1, dataSize));
checkCudaErrors(hipMalloc(&d_pw2, dataSize));
checkCudaErrors(hipMalloc(&d_pw1s, dataSize));
checkCudaErrors(hipMalloc(&d_pw2s, dataSize));
if (inputType == CV_8UC3) {
checkCudaErrors(hipMalloc(&d_i08uc3, dataSize8uc3));
checkCudaErrors(hipMalloc(&d_i18uc3, dataSize8uc3));
}
else if (inputType == CV_8U) {
checkCudaErrors(hipMalloc(&d_i08u, dataSize8u));
checkCudaErrors(hipMalloc(&d_i18u, dataSize8u));
}
// Plane sweep
checkCudaErrors(hipMalloc(&ps_i1warp, dataSize));
checkCudaErrors(hipMalloc(&ps_i1warps, dataSize));
checkCudaErrors(hipMalloc(&ps_error, dataSize));
checkCudaErrors(hipMalloc(&ps_depth, dataSize));
checkCudaErrors(hipMalloc(&ps_disparity, dataSize));
checkCudaErrors(hipMalloc(&ps_disparityForward, dataSize));
checkCudaErrors(hipMalloc(&ps_disparityBackward, dataSize));
checkCudaErrors(hipMalloc(&ps_disparityFinal, dataSize));
// Colored uv, for display only
checkCudaErrors(hipMalloc(&d_uvrgb, dataSize * 3));
uvrgb = cv::Mat(height, stride, CV_32FC3);
disparity = cv::Mat(height, stride, CV_32F);
depth = cv::Mat(height, stride, CV_32F);
planeSweepDepth = cv::Mat(height, stride, CV_32F);
return 0;
}
int Stereo::loadVectorFields(cv::Mat translationVector, cv::Mat calibrationVector) {
// Padding
cv::Mat translationVectorPad = cv::Mat(height, stride, CV_32F);
cv::Mat calibrationVectorPad = cv::Mat(height, stride, CV_32F);
cv::copyMakeBorder(translationVector, translationVectorPad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0);
cv::copyMakeBorder(calibrationVector, calibrationVectorPad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0);
// Translation Vector Field
translationVectorX = cv::Mat(height, stride, CV_32F);
translationVectorY = cv::Mat(height, stride, CV_32F);
calibrationVectorX = cv::Mat(height, stride, CV_32F);
calibrationVectorY = cv::Mat(height, stride, CV_32F);
cv::Mat tuv[2];
cv::split(translationVectorPad, tuv);
translationVectorX = tuv[0];
translationVectorY = tuv[1];
checkCudaErrors(hipMemcpy(d_tvxForward, (float *)translationVectorX.ptr(), dataSize32f, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_tvyForward, (float *)translationVectorY.ptr(), dataSize32f, hipMemcpyHostToDevice));
pTvxForward[0] = d_tvxForward;
pTvyForward[0] = d_tvyForward;
ScalarMultiply(d_tvxForward, -1.0f, width, height, stride, d_tvxBackward);
ScalarMultiply(d_tvyForward, -1.0f, width, height, stride, d_tvyBackward);
pTvxBackward[0] = d_tvxBackward;
pTvyBackward[0] = d_tvyBackward;
for (int level = 1; level < nLevels; level++) {
//std::cout << pW[level] << " " << pH[level] << " " << pS[level] << std::endl;
Downscale(pTvxForward[level - 1], pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level], pTvxForward[level]);
Downscale(pTvyForward[level - 1], pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level], pTvyForward[level]);
Downscale(pTvxBackward[level - 1], pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level], pTvxBackward[level]);
Downscale(pTvyBackward[level - 1], pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level], pTvyBackward[level]);
}
// Calibration Vector Field
cv::Mat cuv[2];
cv::split(calibrationVectorPad, cuv);
calibrationVectorX = cuv[0].clone();
calibrationVectorY = cuv[1].clone();
checkCudaErrors(hipMemcpy(d_cvx, (float *)calibrationVectorX.ptr(), dataSize32f, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_cvy, (float *)calibrationVectorY.ptr(), dataSize32f, hipMemcpyHostToDevice));
return 0;
}
int Stereo::copyImagesToDevice(cv::Mat i0, cv::Mat i1) {
// Padding
cv::copyMakeBorder(i0, im0pad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0);
cv::copyMakeBorder(i1, im1pad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0);
if (inputType == CV_8U) {
checkCudaErrors(hipMemcpy(d_i08u, (uchar *)im0pad.ptr(), dataSize8u, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_i18u, (uchar *)im1pad.ptr(), dataSize8u, hipMemcpyHostToDevice));
// Convert to 32F
Cv8uToGray(d_i08u, pI0[0], width, height, stride);
Cv8uToGray(d_i18u, pI1[0], width, height, stride);
}
else if (inputType == CV_32F) {
checkCudaErrors(hipMemcpy(pI0[0], (float *)im0pad.ptr(), dataSize32f, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(pI1[0], (float *)im1pad.ptr(), dataSize32f, hipMemcpyHostToDevice));
}
else {
checkCudaErrors(hipMemcpy(d_i08uc3, (uchar3 *)im0pad.ptr(), dataSize8uc3, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_i18uc3, (uchar3 *)im1pad.ptr(), dataSize8uc3, hipMemcpyHostToDevice));
// Convert to 32F
Cv8uc3ToGray(d_i08uc3, pI0[0], width, height, stride);
Cv8uc3ToGray(d_i18uc3, pI1[0], width, height, stride);
}
return 0;
}
int Stereo::solveStereoForward() {
// Warp i1 using vector fields
WarpImage(pI1[0], width, height, stride, d_cvx, d_cvy, d_i1calibrated);
Swap(pI1[0], d_i1calibrated);
checkCudaErrors(hipMemset(d_w, 0, dataSize));
checkCudaErrors(hipMemset(d_u, 0, dataSize));
checkCudaErrors(hipMemset(d_v, 0, dataSize));
// Construct pyramid
for (int level = 1; level < nLevels; level++) {
Downscale(pI0[level - 1],
pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level],
pI0[level]);
Downscale(pI1[level - 1],
pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level],
pI1[level]);
}
//planeSweepForward();
ComputeOpticalFlowVector(d_w, d_tvxForward, d_tvyForward, pW[0], pH[0], pS[0], d_u, d_v);
/*cv::Mat calibrated = cv::Mat(height, stride, CV_32F);
checkCudaErrors(hipMemcpy((float *)calibrated.ptr(), ps_disparity, width * height * sizeof(float), hipMemcpyDeviceToHost));
cv::imshow("calibrated", calibrated/(float)planeSweepMaxDisparity);*/
// Solve stereo
for (int level = nLevels - 1; level >= 0; level--) {
for (int warpIter = 0; warpIter < nWarpIters; warpIter++) {
// Compute U,V from W d_w is magnitude of vector d_tvx, d_tvy
// Warp using U,V
//std::cout << "entered" << std::endl;
checkCudaErrors(hipMemset(d_du, 0, dataSize));
checkCudaErrors(hipMemset(d_dv, 0, dataSize));
checkCudaErrors(hipMemset(d_dw, 0, dataSize));
checkCudaErrors(hipMemset(d_dws, 0, dataSize));
checkCudaErrors(hipMemset(d_dwmed, 0, dataSize));
checkCudaErrors(hipMemset(d_dwmeds, 0, dataSize));
checkCudaErrors(hipMemset(d_pw1, 0, dataSize));
checkCudaErrors(hipMemset(d_pw2, 0, dataSize));
FindWarpingVector(d_u, d_v, pTvxForward[level], pTvyForward[level], pW[level], pH[level], pS[level], d_tvx2, d_tvy2);
WarpImage(pI1[level], pW[level], pH[level], pS[level], d_u, d_v, d_i1warp);
//std::cout << pW[level] << " " << pH[level] << " " << pS[level] << std::endl;
ComputeDerivativesFisheye(pI0[level], d_i1warp, pTvxForward[level], pTvyForward[level], pW[level], pH[level], pS[level], d_Iw, d_Iz);
/*if (level == 0) {
cv::Mat calibrated = cv::Mat(pH[level], pS[level], CV_32F);
checkCudaErrors(hipMemcpy((float *)calibrated.ptr(), d_i1warp, pS[level] * pH[level] * sizeof(float), hipMemcpyDeviceToHost));
cv::imshow("gradient", calibrated);
}*/
// Inner iteration
for (int iter = 0; iter < nSolverIters; ++iter)
{
SolveDataL1Stereo(d_dwmed,
d_pw1, d_pw2,
d_Iw, d_Iz,
pW[level], pH[level], pS[level],
lambda, theta,
d_dwmeds); //du1 = duhat output
Swap(d_dwmed, d_dwmeds);
SolveSmoothDualTVGlobalStereo(d_dwmed,
d_pw1, d_pw2,
pW[level], pH[level], pS[level],
tau, theta,
d_pw1s, d_pw2s);
Swap(d_pw1, d_pw1s);
Swap(d_pw2, d_pw2s);
}
// Sanity Check: Limit disparity to 1
LimitRange(d_dwmed, 1.0f, pW[level], pH[level], pS[level], d_dwmeds);
Swap(d_dwmed, d_dwmeds);
//// One median filtering
MedianFilterDisparity(d_dwmed, pW[level], pH[level], pS[level],
d_dwmeds, 5);
Swap(d_dwmed, d_dwmeds);
//// Calculate d_du, d_dv
ComputeOpticalFlowVector(d_dwmed, d_tvx2, d_tvy2, pW[level], pH[level], pS[level], d_du, d_dv);
//// update w, u, v
Add(d_w, d_dwmed, pH[level] * pS[level], d_w);
Add(d_u, d_du, pH[level] * pS[level], d_u);
Add(d_v, d_dv, pH[level] * pS[level], d_v);
}
// Upscale
if (level > 0)
{
float scale = fScale;
Upscale(d_u, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_us);
Upscale(d_v, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_vs);
Upscale(d_w, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_ws);
Swap(d_u, d_us);
Swap(d_v, d_vs);
Swap(d_w, d_ws);
}
}
Clone(d_w, width, height, stride, d_wForward);
if (visualizeResults) {
FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale);
}
return 0;
}
int Stereo::solveStereoBackward() {
// Warp i1 using vector fields
//WarpImage(pI1[0], width, height, stride, d_cvx, d_cvy, d_i1calibrated);
//Swap(pI1[0], d_i1calibrated);
Swap(pI0[0], pI1[0]);
checkCudaErrors(hipMemset(d_w, 0, dataSize));
checkCudaErrors(hipMemset(d_u, 0, dataSize));
checkCudaErrors(hipMemset(d_v, 0, dataSize));
// Construct pyramid
for (int level = 1; level < nLevels; level++) {
Swap(pI0[level], pI1[level]);
}
//planeSweepBackward();
//Clone(ps_disparity, pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], d_w);
ComputeOpticalFlowVector(d_w, d_tvxBackward, d_tvyBackward, pW[0], pH[0], pS[0], d_u, d_v);
/*cv::Mat calibrated = cv::Mat(height, stride, CV_32F);
checkCudaErrors(hipMemcpy((float *)calibrated.ptr(), ps_disparity, width * height * sizeof(float), hipMemcpyDeviceToHost));
cv::imshow("calibrated", calibrated/(float)planeSweepMaxDisparity);*/
// Solve stereo
for (int level = nLevels - 1; level >= 0; level--) {
for (int warpIter = 0; warpIter < nWarpIters; warpIter++) {
// Compute U,V from W d_w is magnitude of vector d_tvx, d_tvy
// Warp using U,V
//std::cout << "entered" << std::endl;
checkCudaErrors(hipMemset(d_du, 0, dataSize));
checkCudaErrors(hipMemset(d_dv, 0, dataSize));
checkCudaErrors(hipMemset(d_dw, 0, dataSize));
checkCudaErrors(hipMemset(d_dws, 0, dataSize));
checkCudaErrors(hipMemset(d_dwmed, 0, dataSize));
checkCudaErrors(hipMemset(d_dwmeds, 0, dataSize));
checkCudaErrors(hipMemset(d_pw1, 0, dataSize));
checkCudaErrors(hipMemset(d_pw2, 0, dataSize));
FindWarpingVector(d_u, d_v, pTvxBackward[level], pTvyBackward[level],
pW[level], pH[level], pS[level], d_tvx2, d_tvy2);
WarpImage(pI1[level], pW[level], pH[level], pS[level], d_u, d_v, d_i1warp);
//std::cout << pW[level] << " " << pH[level] << " " << pS[level] << std::endl;
ComputeDerivativesFisheye(pI0[level], d_i1warp, pTvxBackward[level], pTvyBackward[level],
pW[level], pH[level], pS[level], d_Iw, d_Iz);
/*if (level == 0) {
cv::Mat calibrated = cv::Mat(pH[level], pS[level], CV_32F);
checkCudaErrors(hipMemcpy((float *)calibrated.ptr(), d_i1warp, pS[level] * pH[level] * sizeof(float), hipMemcpyDeviceToHost));
cv::imshow("gradient", calibrated);
}*/
// Inner iteration
for (int iter = 0; iter < nSolverIters; ++iter)
{
SolveDataL1Stereo(d_dwmed,
d_pw1, d_pw2,
d_Iw, d_Iz,
pW[level], pH[level], pS[level],
lambda, theta,
d_dwmeds); //du1 = duhat output
Swap(d_dwmed, d_dwmeds);
SolveSmoothDualTVGlobalStereo(d_dwmed,
d_pw1, d_pw2,
pW[level], pH[level], pS[level],
tau, theta,
d_pw1s, d_pw2s);
Swap(d_pw1, d_pw1s);
Swap(d_pw2, d_pw2s);
}
// Sanity Check: Limit disparity to 1
LimitRange(d_dwmed, 1.0f, pW[level], pH[level], pS[level], d_dwmeds);
Swap(d_dwmed, d_dwmeds);
//// One median filtering
MedianFilterDisparity(d_dwmed, pW[level], pH[level], pS[level],
d_dwmeds, 5);
Swap(d_dwmed, d_dwmeds);
//// Calculate d_du, d_dv
ComputeOpticalFlowVector(d_dwmed, d_tvx2, d_tvy2, pW[level], pH[level], pS[level], d_du, d_dv);
//// update w, u, v
Add(d_w, d_dwmed, pH[level] * pS[level], d_w);
Add(d_u, d_du, pH[level] * pS[level], d_u);
Add(d_v, d_dv, pH[level] * pS[level], d_v);
}
// Upscale
if (level > 0)
{
float scale = fScale;
Upscale(d_u, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_us);
Upscale(d_v, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_vs);
Upscale(d_w, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_ws);
Swap(d_u, d_us);
Swap(d_v, d_vs);
Swap(d_w, d_ws);
}
}
Clone(d_w, width, height, stride, d_wBackward);
if (visualizeResults) {
FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale);
}
return 0;
}
int Stereo::occlusionCheck(float threshold) {
isOcclusionChecked = true;
// Get wFinal
OcclusionCheck(d_wForward, d_wBackward, threshold, d_uForward, d_vForward, width, height, stride, d_wFinal);
return 0;
}
int Stereo::planeSweepForward() {
// Plane sweep on level=1
int planeSweepLevel = 0;
checkCudaErrors(hipMemset(ps_error, 0, dataSize));
checkCudaErrors(hipMemset(ps_depth, 0, dataSize));
checkCudaErrors(hipMemset(ps_disparity, 0, dataSize));
Clone(pI1[planeSweepLevel], pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], ps_i1warp);
SetValue(ps_error, planeSweepMaxError, pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel]);
for (int sweep = 0; sweep < planeSweepMaxDisparity; sweep += planeSweepStride) {
PlaneSweepCorrelation(ps_i1warp, pI0[planeSweepLevel], ps_disparity, sweep, planeSweepWindow,
pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], ps_error);
for (int psStride = 0; psStride < planeSweepStride; psStride++) {
WarpImage(ps_i1warp, pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], d_tvxForward, d_tvyForward, ps_i1warps);
Swap(ps_i1warp, ps_i1warps);
}
}
//Clone(ps_disparity, pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], d_w);
return 0;
}
int Stereo::planeSweepBackward() {
// Plane sweep on level=1
int planeSweepLevel = 0;
checkCudaErrors(hipMemset(ps_error, 0, dataSize));
checkCudaErrors(hipMemset(ps_depth, 0, dataSize));
checkCudaErrors(hipMemset(ps_disparity, 0, dataSize));
Clone(pI1[planeSweepLevel], pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], ps_i1warp);
SetValue(ps_error, planeSweepMaxError, pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel]);
for (int sweep = 0; sweep < planeSweepMaxDisparity; sweep += planeSweepStride) {
PlaneSweepCorrelation(ps_i1warp, pI0[planeSweepLevel], ps_disparity, sweep, planeSweepWindow,
pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], ps_error);
for (int psStride = 0; psStride < planeSweepStride; psStride++) {
WarpImage(ps_i1warp, pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel],
d_tvxBackward, d_tvyBackward, ps_i1warps);
Swap(ps_i1warp, ps_i1warps);
}
}
return 0;
}
int Stereo::planeSweepOcclusionCheck() {
isPlaneSweepOcclusionChecked = true;
// Get wFinal
return 0;
}
int Stereo::copyStereoToHost(cv::Mat &wCropped) {
// Convert Disparity to Depth
if (isOcclusionChecked) {
ConvertDisparityToDepth(d_wFinal, baseline, focal, width, height, stride, d_depth);
}
else {
ConvertDisparityToDepth(d_w, baseline, focal, width, height, stride, d_depth);
}
// Remove Padding
//checkCudaErrors(hipMemcpy((float *)depth.ptr(), d_w, stride * height * sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy((float *)depth.ptr(), d_depth, stride * height * sizeof(float), hipMemcpyDeviceToHost));
cv::Rect roi(0, 0, width, height); // define roi here as x0, y0, width, height
wCropped = depth(roi);
return 0;
}
int Stereo::copyPlaneSweepToHost(cv::Mat &ps) {
// Convert Disparity to Depth
if (isPlaneSweepOcclusionChecked) {
ConvertDisparityToDepth(ps_disparityFinal, baseline, focal, width, height, stride, ps_depth);
}
else {
ConvertDisparityToDepth(ps_disparity, baseline, focal, width, height, stride, ps_depth);
}
// Remove Padding
//checkCudaErrors(hipMemcpy((float *)depth.ptr(), d_w, stride * height * sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy((float *)planeSweepDepth.ptr(), ps_depth, stride * height * sizeof(float), hipMemcpyDeviceToHost));
cv::Rect roi(0, 0, width, height); // define roi here as x0, y0, width, height
ps = planeSweepDepth(roi);
return 0;
}
int Stereo::solveOpticalFlow() {
// construct pyramid
for (int level = 1; level < nLevels; level++) {
Downscale(pI0[level - 1],
pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level],
pI0[level]);
Downscale(pI1[level - 1],
pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level],
pI1[level]);
}
// solve flow
checkCudaErrors(hipMemset(d_u, 0, dataSize));
checkCudaErrors(hipMemset(d_v, 0, dataSize));
for (int level = nLevels - 1; level >= 0; level--) {
for (int warpIter = 0; warpIter < nWarpIters; warpIter++) {
//std::cout << level << std::endl;
//initialize zeros
checkCudaErrors(hipMemset(d_du, 0, dataSize));
checkCudaErrors(hipMemset(d_dv, 0, dataSize));
checkCudaErrors(hipMemset(d_dus, 0, dataSize));
checkCudaErrors(hipMemset(d_dvs, 0, dataSize));
checkCudaErrors(hipMemset(d_dumed, 0, dataSize));
checkCudaErrors(hipMemset(d_dvmed, 0, dataSize));
checkCudaErrors(hipMemset(d_dumeds, 0, dataSize));
checkCudaErrors(hipMemset(d_dvmeds, 0, dataSize));
checkCudaErrors(hipMemset(d_pu1, 0, dataSize));
checkCudaErrors(hipMemset(d_pu2, 0, dataSize));
checkCudaErrors(hipMemset(d_pv1, 0, dataSize));
checkCudaErrors(hipMemset(d_pv2, 0, dataSize));
//warp frame 1
WarpImage(pI1[level], pW[level], pH[level], pS[level], d_u, d_v, d_i1warp);
ComputeDerivatives(pI0[level], d_i1warp, pW[level], pH[level], pS[level], d_Ix, d_Iy, d_Iz);
//inner iteration
for (int iter = 0; iter < nSolverIters; ++iter)
{
SolveDataL1(d_dumed, d_dvmed,
d_pu1, d_pu2,
d_pv1, d_pv2,
d_Ix, d_Iy, d_Iz,
pW[level], pH[level], pS[level],
lambda, theta,
d_dumeds, d_dvmeds); //du1 = duhat output
Swap(d_dumed, d_dumeds);
Swap(d_dvmed, d_dvmeds);
SolveSmoothDualTVGlobal(d_dumed, d_dvmed,
d_pu1, d_pu2, d_pv1, d_pv2,
pW[level], pH[level], pS[level],
tau, theta,
d_pu1s, d_pu2s, d_pv1s, d_pv2s);
Swap(d_pu1, d_pu1s);
Swap(d_pu2, d_pu2s);
Swap(d_pv1, d_pv1s);
Swap(d_pv2, d_pv2s);
//***********************************
/*MedianFilter(d_dumed, d_dvmed, pW[level], pH[level], pS[level],
d_dumeds, d_dvmeds, 5);
Swap(d_dumed, d_dumeds);
Swap(d_dvmed, d_dvmeds);*/
}
// one median filtering
MedianFilter(d_dumed, d_dvmed, pW[level], pH[level], pS[level],
d_dumeds, d_dvmeds, 5);
Swap(d_dumed, d_dumeds);
Swap(d_dvmed, d_dvmeds);
// update u, v
Add(d_u, d_dumed, pH[level] * pS[level], d_u);
Add(d_v, d_dvmed, pH[level] * pS[level], d_v);
/*
MedianFilter(d_u, d_v, pW[level], pH[level], pS[level],
d_dumeds, d_dvmeds, 5);
Swap(d_u, d_dumeds);
Swap(d_v, d_dvmeds);*/
}
//upscale
if (level > 0)
{
// scale uv
//float scale = (float)pW[level + 1] / (float)pW[level];
float scale = fScale;
Upscale(d_u, pW[level], pH[level], pS[level],
pW[level - 1], pH[level - 1], pS[level - 1], scale, d_us);
//float scaleY = (float)pH[level + 1] / (float)pH[level];
Upscale(d_v, pW[level], pH[level], pS[level],
pW[level - 1], pH[level - 1], pS[level - 1], scale, d_vs);
Swap(d_u, d_us);
Swap(d_v, d_vs);
}
}
if (visualizeResults) {
FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale);
}
//FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale);
//SolveSceneFlow(d_u, d_v, d_depth016u, d_depth116u, width, height, stride, d_sceneflow);
//std::cout << stride << " " << height << " " << height << " " << inputChannels << std::endl;
return 0;
}
int Stereo::copyOpticalFlowVisToHost(cv::Mat &uvrgbCropped) {
// Remove Padding
checkCudaErrors(hipMemcpy((float3 *)uvrgb.ptr(), d_uvrgb, width * height * sizeof(float) * 3, hipMemcpyDeviceToHost));
cv::Rect roi(0, 0, width, height); // define roi here as x0, y0, width, height
uvrgbCropped = uvrgb(roi);
return 0;
}
int Stereo::copyOpticalFlowToHost(cv::Mat &u, cv::Mat &v) {
checkCudaErrors(hipMemcpy((float *)upad.ptr(), d_u, stride * height * sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy((float *)vpad.ptr(), d_v, stride * height * sizeof(float), hipMemcpyDeviceToHost));
// Remove Padding
cv::Rect roi(0, 0, width, height); // define roi here as x0, y0, width, height
u = upad(roi);
v = vpad(roi);
return 0;
}
// Align up n to the nearest multiple of m
inline int Stereo::iAlignUp(int n)
{
int m = this->StrideAlignment;
int mod = n % m;
if (mod)
return n + m - mod;
else
return n;
}
int Stereo::iDivUp(int n, int m)
{
return (n + m - 1) / m;
}
// swap two values
template<typename T>
inline void Stereo::Swap(T &a, T &ax)
{
T t = a;
a = ax;
ax = t;
}
//swap four values
template<typename T>
inline void Stereo::Swap(T &a, T &ax, T &b, T &bx)
{
Swap(a, ax);
Swap(b, bx);
}
//swap eight values
template<typename T>
inline void Stereo::Swap(T &a, T &ax, T &b, T &bx, T &c, T &cx, T &d, T &dx)
{
Swap(a, ax);
Swap(b, bx);
Swap(c, cx);
Swap(d, dx);
}
int Stereo::computePyramidLevels(int width, int height, int minWidth, float scale) {
int nLevels = 1;
int pHeight = (int)((float)height / scale);
while (pHeight > minWidth) {
nLevels++;
pHeight = (int)((float)pHeight / scale);
}
std::cout << "Pyramid Levels: " << nLevels << std::endl;
return nLevels;
}
int Stereo::initializeColorWheel() {
checkCudaErrors(hipMalloc(&d_colorwheel, 55 * 3 * sizeof(float)));
float colorwheel[165] = { 255, 0, 0,
255, 17, 0,
255, 34, 0,
255, 51, 0,
255, 68, 0,
255, 85, 0,
255, 102, 0,
255, 119, 0,
255, 136, 0,
255, 153, 0,
255, 170, 0,
255, 187, 0,
255, 204, 0,
255, 221, 0,
255, 238, 0,
255, 255, 0,
213, 255, 0,
170, 255, 0,
128, 255, 0,
85, 255, 0,
43, 255, 0,
0, 255, 0,
0, 255, 63,
0, 255, 127,
0, 255, 191,
0, 255, 255,
0, 232, 255,
0, 209, 255,
0, 186, 255,
0, 163, 255,
0, 140, 255,
0, 116, 255,
0, 93, 255,
0, 70, 255,
0, 47, 255,
0, 24, 255,
0, 0, 255,
19, 0, 255,
39, 0, 255,
58, 0, 255,
78, 0, 255,
98, 0, 255,
117, 0, 255,
137, 0, 255,
156, 0, 255,
176, 0, 255,
196, 0, 255,
215, 0, 255,
235, 0, 255,
255, 0, 255,
255, 0, 213,
255, 0, 170,
255, 0, 128,
255, 0, 85,
255, 0, 43 };
checkCudaErrors(hipMemcpy(colorwheel, d_colorwheel, 55 * 3 * sizeof(float), hipMemcpyDeviceToHost));
return 0;
} | 158bb34fcddb9509615fbba7c08940cd3e7ab99b.cu | #include "stereo.h"
Stereo::Stereo() {
this->BlockWidth = 32;
this->BlockHeight = 12;
this->StrideAlignment = 32;
}
Stereo::Stereo(int BlockWidth, int BlockHeight, int StrideAlignment) {
this->BlockWidth = BlockWidth;
this->BlockHeight = BlockHeight;
this->StrideAlignment = StrideAlignment;
}
int Stereo::initializeOpticalFlow(int width, int height, int channels, int inputType, int nLevels, float scale, float lambda,
float theta, float tau, int nWarpIters, int nSolverIters)
{
//allocate all memories
this->width = width;
this->height = height;
this->stride = iAlignUp(width);
this->inputType = inputType;
this->fScale = scale;
this->nLevels = nLevels;
this->inputChannels = channels;
this->nSolverIters = nSolverIters; //number of inner iteration (ROF loop)
this->nWarpIters = nWarpIters;
this->lambda = lambda;
this->theta = theta;
this->tau = tau;
pI0 = std::vector<float*>(nLevels);
pI1 = std::vector<float*>(nLevels);
pW = std::vector<int>(nLevels);
pH = std::vector<int>(nLevels);
pS = std::vector<int>(nLevels);
pDataSize = std::vector<int>(nLevels);
int newHeight = height;
int newWidth = width;
int newStride = iAlignUp(width);
//std::cout << "Pyramid Sizes: " << newWidth << " " << newHeight << " " << newStride << std::endl;
for (int level = 0; level < nLevels; level++) {
pDataSize[level] = newStride * newHeight * sizeof(float);
checkCudaErrors(cudaMalloc(&pI0[level], pDataSize[level]));
checkCudaErrors(cudaMalloc(&pI1[level], pDataSize[level]));
pW[level] = newWidth;
pH[level] = newHeight;
pS[level] = newStride;
newHeight = newHeight / fScale;
newWidth = newWidth / fScale;
newStride = iAlignUp(newWidth);
}
//runtime
dataSize = stride * height * sizeof(float);
dataSize8uc3 = stride * height * sizeof(uchar3);
dataSize8u = stride * height * sizeof(uchar);
dataSize32f = dataSize;
dataSize32fc3 = dataSize * 3;
checkCudaErrors(cudaMalloc(&d_i1warp, dataSize));
checkCudaErrors(cudaMalloc(&d_du, dataSize));
checkCudaErrors(cudaMalloc(&d_dv, dataSize));
checkCudaErrors(cudaMalloc(&d_dus, dataSize));
checkCudaErrors(cudaMalloc(&d_dvs, dataSize));
checkCudaErrors(cudaMalloc(&d_dumed, dataSize));
checkCudaErrors(cudaMalloc(&d_dvmed, dataSize));
checkCudaErrors(cudaMalloc(&d_dumeds, dataSize));
checkCudaErrors(cudaMalloc(&d_dvmeds, dataSize));
//dual TV
checkCudaErrors(cudaMalloc(&d_pu1, dataSize));
checkCudaErrors(cudaMalloc(&d_pu2, dataSize));
checkCudaErrors(cudaMalloc(&d_pv1, dataSize));
checkCudaErrors(cudaMalloc(&d_pv2, dataSize));
//dual TV temps
checkCudaErrors(cudaMalloc(&d_pu1s, dataSize));
checkCudaErrors(cudaMalloc(&d_pu2s, dataSize));
checkCudaErrors(cudaMalloc(&d_pv1s, dataSize));
checkCudaErrors(cudaMalloc(&d_pv2s, dataSize));
checkCudaErrors(cudaMalloc(&d_Ix, dataSize));
checkCudaErrors(cudaMalloc(&d_Iy, dataSize));
checkCudaErrors(cudaMalloc(&d_Iz, dataSize));
checkCudaErrors(cudaMalloc(&d_u, dataSize));
checkCudaErrors(cudaMalloc(&d_v, dataSize));
checkCudaErrors(cudaMalloc(&d_us, dataSize));
checkCudaErrors(cudaMalloc(&d_vs, dataSize));
if (inputType == CV_8UC3) {
checkCudaErrors(cudaMalloc(&d_i08uc3, dataSize8uc3));
checkCudaErrors(cudaMalloc(&d_i18uc3, dataSize8uc3));
}
else if (inputType == CV_8U) {
checkCudaErrors(cudaMalloc(&d_i08u, dataSize8u));
checkCudaErrors(cudaMalloc(&d_i18u, dataSize8u));
}
// colored uv, for display only
checkCudaErrors(cudaMalloc(&d_uvrgb, dataSize * 3));
// Output mats
uvrgb = cv::Mat(height, stride, CV_32FC3);
upad = cv::Mat(height, stride, CV_32F);
vpad = cv::Mat(height, stride, CV_32F);
return 0;
}
int Stereo::initializeFisheyeStereo(int width, int height, int channels, int inputType, int nLevels, float scale, float lambda,
float theta, float tau, int nWarpIters, int nSolverIters) {
//allocate all memories
this->width = width;
this->height = height;
this->stride = iAlignUp(width);
this->inputType = inputType;
this->fScale = scale;
this->nLevels = nLevels;
this->inputChannels = channels;
this->nSolverIters = nSolverIters; //number of inner iteration (ROF loop)
this->nWarpIters = nWarpIters;
this->lambda = lambda;
this->theta = theta;
this->tau = tau;
pI0 = std::vector<float*>(nLevels);
pI1 = std::vector<float*>(nLevels);
pW = std::vector<int>(nLevels);
pH = std::vector<int>(nLevels);
pS = std::vector<int>(nLevels);
pDataSize = std::vector<int>(nLevels);
pTvxForward = std::vector<float*>(nLevels);
pTvyForward = std::vector<float*>(nLevels);
pTvxBackward = std::vector<float*>(nLevels);
pTvyBackward = std::vector<float*>(nLevels);
int newHeight = height;
int newWidth = width;
int newStride = iAlignUp(width);
//std::cout << "Pyramid Sizes: " << newWidth << " " << newHeight << " " << newStride << std::endl;
for (int level = 0; level < nLevels; level++) {
pDataSize[level] = newStride * newHeight * sizeof(float);
checkCudaErrors(cudaMalloc(&pI0[level], pDataSize[level]));
checkCudaErrors(cudaMalloc(&pI1[level], pDataSize[level]));
checkCudaErrors(cudaMalloc(&pTvxForward[level], pDataSize[level]));
checkCudaErrors(cudaMalloc(&pTvyForward[level], pDataSize[level]));
checkCudaErrors(cudaMalloc(&pTvxBackward[level], pDataSize[level]));
checkCudaErrors(cudaMalloc(&pTvyBackward[level], pDataSize[level]));
pW[level] = newWidth;
pH[level] = newHeight;
pS[level] = newStride;
newHeight = newHeight / fScale;
newWidth = newWidth / fScale;
newStride = iAlignUp(newWidth);
}
//runtime
dataSize = stride * height * sizeof(float);
dataSize8uc3 = stride * height * sizeof(uchar3);
dataSize8u = stride * height * sizeof(uchar);
dataSize32f = dataSize;
dataSize32fc3 = dataSize * 3;
checkCudaErrors(cudaMalloc(&d_i1warp, dataSize));
checkCudaErrors(cudaMalloc(&d_tvxForward, dataSize));
checkCudaErrors(cudaMalloc(&d_tvyForward, dataSize));
checkCudaErrors(cudaMalloc(&d_tvxBackward, dataSize));
checkCudaErrors(cudaMalloc(&d_tvyBackward, dataSize));
checkCudaErrors(cudaMalloc(&d_tvx2, dataSize));
checkCudaErrors(cudaMalloc(&d_tvy2, dataSize));
checkCudaErrors(cudaMalloc(&d_cvx, dataSize));
checkCudaErrors(cudaMalloc(&d_cvy, dataSize));
checkCudaErrors(cudaMalloc(&d_i1calibrated, dataSize));
checkCudaErrors(cudaMalloc(&d_Iw, dataSize));
checkCudaErrors(cudaMalloc(&d_Iz, dataSize));
checkCudaErrors(cudaMalloc(&d_w, dataSize));
checkCudaErrors(cudaMalloc(&d_wForward, dataSize));
checkCudaErrors(cudaMalloc(&d_wBackward, dataSize));
checkCudaErrors(cudaMalloc(&d_wFinal, dataSize));
checkCudaErrors(cudaMalloc(&d_u, dataSize));
checkCudaErrors(cudaMalloc(&d_v, dataSize));
checkCudaErrors(cudaMalloc(&d_uForward, dataSize));
checkCudaErrors(cudaMalloc(&d_vForward, dataSize));
checkCudaErrors(cudaMalloc(&d_us, dataSize));
checkCudaErrors(cudaMalloc(&d_vs, dataSize));
checkCudaErrors(cudaMalloc(&d_ws, dataSize));
checkCudaErrors(cudaMalloc(&d_du, dataSize));
checkCudaErrors(cudaMalloc(&d_dv, dataSize));
checkCudaErrors(cudaMalloc(&d_dw, dataSize));
checkCudaErrors(cudaMalloc(&d_dws, dataSize));
checkCudaErrors(cudaMalloc(&d_depth, dataSize));
checkCudaErrors(cudaMalloc(&d_depthFinal, dataSize));
checkCudaErrors(cudaMalloc(&d_occlusion, dataSize));
checkCudaErrors(cudaMalloc(&d_dwmed, dataSize));
checkCudaErrors(cudaMalloc(&d_dwmeds, dataSize));
checkCudaErrors(cudaMalloc(&d_pw1, dataSize));
checkCudaErrors(cudaMalloc(&d_pw2, dataSize));
checkCudaErrors(cudaMalloc(&d_pw1s, dataSize));
checkCudaErrors(cudaMalloc(&d_pw2s, dataSize));
if (inputType == CV_8UC3) {
checkCudaErrors(cudaMalloc(&d_i08uc3, dataSize8uc3));
checkCudaErrors(cudaMalloc(&d_i18uc3, dataSize8uc3));
}
else if (inputType == CV_8U) {
checkCudaErrors(cudaMalloc(&d_i08u, dataSize8u));
checkCudaErrors(cudaMalloc(&d_i18u, dataSize8u));
}
// Plane sweep
checkCudaErrors(cudaMalloc(&ps_i1warp, dataSize));
checkCudaErrors(cudaMalloc(&ps_i1warps, dataSize));
checkCudaErrors(cudaMalloc(&ps_error, dataSize));
checkCudaErrors(cudaMalloc(&ps_depth, dataSize));
checkCudaErrors(cudaMalloc(&ps_disparity, dataSize));
checkCudaErrors(cudaMalloc(&ps_disparityForward, dataSize));
checkCudaErrors(cudaMalloc(&ps_disparityBackward, dataSize));
checkCudaErrors(cudaMalloc(&ps_disparityFinal, dataSize));
// Colored uv, for display only
checkCudaErrors(cudaMalloc(&d_uvrgb, dataSize * 3));
uvrgb = cv::Mat(height, stride, CV_32FC3);
disparity = cv::Mat(height, stride, CV_32F);
depth = cv::Mat(height, stride, CV_32F);
planeSweepDepth = cv::Mat(height, stride, CV_32F);
return 0;
}
int Stereo::loadVectorFields(cv::Mat translationVector, cv::Mat calibrationVector) {
// Padding
cv::Mat translationVectorPad = cv::Mat(height, stride, CV_32F);
cv::Mat calibrationVectorPad = cv::Mat(height, stride, CV_32F);
cv::copyMakeBorder(translationVector, translationVectorPad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0);
cv::copyMakeBorder(calibrationVector, calibrationVectorPad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0);
// Translation Vector Field
translationVectorX = cv::Mat(height, stride, CV_32F);
translationVectorY = cv::Mat(height, stride, CV_32F);
calibrationVectorX = cv::Mat(height, stride, CV_32F);
calibrationVectorY = cv::Mat(height, stride, CV_32F);
cv::Mat tuv[2];
cv::split(translationVectorPad, tuv);
translationVectorX = tuv[0];
translationVectorY = tuv[1];
checkCudaErrors(cudaMemcpy(d_tvxForward, (float *)translationVectorX.ptr(), dataSize32f, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_tvyForward, (float *)translationVectorY.ptr(), dataSize32f, cudaMemcpyHostToDevice));
pTvxForward[0] = d_tvxForward;
pTvyForward[0] = d_tvyForward;
ScalarMultiply(d_tvxForward, -1.0f, width, height, stride, d_tvxBackward);
ScalarMultiply(d_tvyForward, -1.0f, width, height, stride, d_tvyBackward);
pTvxBackward[0] = d_tvxBackward;
pTvyBackward[0] = d_tvyBackward;
for (int level = 1; level < nLevels; level++) {
//std::cout << pW[level] << " " << pH[level] << " " << pS[level] << std::endl;
Downscale(pTvxForward[level - 1], pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level], pTvxForward[level]);
Downscale(pTvyForward[level - 1], pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level], pTvyForward[level]);
Downscale(pTvxBackward[level - 1], pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level], pTvxBackward[level]);
Downscale(pTvyBackward[level - 1], pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level], pTvyBackward[level]);
}
// Calibration Vector Field
cv::Mat cuv[2];
cv::split(calibrationVectorPad, cuv);
calibrationVectorX = cuv[0].clone();
calibrationVectorY = cuv[1].clone();
checkCudaErrors(cudaMemcpy(d_cvx, (float *)calibrationVectorX.ptr(), dataSize32f, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_cvy, (float *)calibrationVectorY.ptr(), dataSize32f, cudaMemcpyHostToDevice));
return 0;
}
int Stereo::copyImagesToDevice(cv::Mat i0, cv::Mat i1) {
// Padding
cv::copyMakeBorder(i0, im0pad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0);
cv::copyMakeBorder(i1, im1pad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0);
if (inputType == CV_8U) {
checkCudaErrors(cudaMemcpy(d_i08u, (uchar *)im0pad.ptr(), dataSize8u, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_i18u, (uchar *)im1pad.ptr(), dataSize8u, cudaMemcpyHostToDevice));
// Convert to 32F
Cv8uToGray(d_i08u, pI0[0], width, height, stride);
Cv8uToGray(d_i18u, pI1[0], width, height, stride);
}
else if (inputType == CV_32F) {
checkCudaErrors(cudaMemcpy(pI0[0], (float *)im0pad.ptr(), dataSize32f, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(pI1[0], (float *)im1pad.ptr(), dataSize32f, cudaMemcpyHostToDevice));
}
else {
checkCudaErrors(cudaMemcpy(d_i08uc3, (uchar3 *)im0pad.ptr(), dataSize8uc3, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_i18uc3, (uchar3 *)im1pad.ptr(), dataSize8uc3, cudaMemcpyHostToDevice));
// Convert to 32F
Cv8uc3ToGray(d_i08uc3, pI0[0], width, height, stride);
Cv8uc3ToGray(d_i18uc3, pI1[0], width, height, stride);
}
return 0;
}
int Stereo::solveStereoForward() {
// Warp i1 using vector fields
WarpImage(pI1[0], width, height, stride, d_cvx, d_cvy, d_i1calibrated);
Swap(pI1[0], d_i1calibrated);
checkCudaErrors(cudaMemset(d_w, 0, dataSize));
checkCudaErrors(cudaMemset(d_u, 0, dataSize));
checkCudaErrors(cudaMemset(d_v, 0, dataSize));
// Construct pyramid
for (int level = 1; level < nLevels; level++) {
Downscale(pI0[level - 1],
pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level],
pI0[level]);
Downscale(pI1[level - 1],
pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level],
pI1[level]);
}
//planeSweepForward();
ComputeOpticalFlowVector(d_w, d_tvxForward, d_tvyForward, pW[0], pH[0], pS[0], d_u, d_v);
/*cv::Mat calibrated = cv::Mat(height, stride, CV_32F);
checkCudaErrors(cudaMemcpy((float *)calibrated.ptr(), ps_disparity, width * height * sizeof(float), cudaMemcpyDeviceToHost));
cv::imshow("calibrated", calibrated/(float)planeSweepMaxDisparity);*/
// Solve stereo
for (int level = nLevels - 1; level >= 0; level--) {
for (int warpIter = 0; warpIter < nWarpIters; warpIter++) {
// Compute U,V from W d_w is magnitude of vector d_tvx, d_tvy
// Warp using U,V
//std::cout << "entered" << std::endl;
checkCudaErrors(cudaMemset(d_du, 0, dataSize));
checkCudaErrors(cudaMemset(d_dv, 0, dataSize));
checkCudaErrors(cudaMemset(d_dw, 0, dataSize));
checkCudaErrors(cudaMemset(d_dws, 0, dataSize));
checkCudaErrors(cudaMemset(d_dwmed, 0, dataSize));
checkCudaErrors(cudaMemset(d_dwmeds, 0, dataSize));
checkCudaErrors(cudaMemset(d_pw1, 0, dataSize));
checkCudaErrors(cudaMemset(d_pw2, 0, dataSize));
FindWarpingVector(d_u, d_v, pTvxForward[level], pTvyForward[level], pW[level], pH[level], pS[level], d_tvx2, d_tvy2);
WarpImage(pI1[level], pW[level], pH[level], pS[level], d_u, d_v, d_i1warp);
//std::cout << pW[level] << " " << pH[level] << " " << pS[level] << std::endl;
ComputeDerivativesFisheye(pI0[level], d_i1warp, pTvxForward[level], pTvyForward[level], pW[level], pH[level], pS[level], d_Iw, d_Iz);
/*if (level == 0) {
cv::Mat calibrated = cv::Mat(pH[level], pS[level], CV_32F);
checkCudaErrors(cudaMemcpy((float *)calibrated.ptr(), d_i1warp, pS[level] * pH[level] * sizeof(float), cudaMemcpyDeviceToHost));
cv::imshow("gradient", calibrated);
}*/
// Inner iteration
for (int iter = 0; iter < nSolverIters; ++iter)
{
SolveDataL1Stereo(d_dwmed,
d_pw1, d_pw2,
d_Iw, d_Iz,
pW[level], pH[level], pS[level],
lambda, theta,
d_dwmeds); //du1 = duhat output
Swap(d_dwmed, d_dwmeds);
SolveSmoothDualTVGlobalStereo(d_dwmed,
d_pw1, d_pw2,
pW[level], pH[level], pS[level],
tau, theta,
d_pw1s, d_pw2s);
Swap(d_pw1, d_pw1s);
Swap(d_pw2, d_pw2s);
}
// Sanity Check: Limit disparity to 1
LimitRange(d_dwmed, 1.0f, pW[level], pH[level], pS[level], d_dwmeds);
Swap(d_dwmed, d_dwmeds);
//// One median filtering
MedianFilterDisparity(d_dwmed, pW[level], pH[level], pS[level],
d_dwmeds, 5);
Swap(d_dwmed, d_dwmeds);
//// Calculate d_du, d_dv
ComputeOpticalFlowVector(d_dwmed, d_tvx2, d_tvy2, pW[level], pH[level], pS[level], d_du, d_dv);
//// update w, u, v
Add(d_w, d_dwmed, pH[level] * pS[level], d_w);
Add(d_u, d_du, pH[level] * pS[level], d_u);
Add(d_v, d_dv, pH[level] * pS[level], d_v);
}
// Upscale
if (level > 0)
{
float scale = fScale;
Upscale(d_u, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_us);
Upscale(d_v, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_vs);
Upscale(d_w, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_ws);
Swap(d_u, d_us);
Swap(d_v, d_vs);
Swap(d_w, d_ws);
}
}
Clone(d_w, width, height, stride, d_wForward);
if (visualizeResults) {
FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale);
}
return 0;
}
int Stereo::solveStereoBackward() {
// Warp i1 using vector fields
//WarpImage(pI1[0], width, height, stride, d_cvx, d_cvy, d_i1calibrated);
//Swap(pI1[0], d_i1calibrated);
Swap(pI0[0], pI1[0]);
checkCudaErrors(cudaMemset(d_w, 0, dataSize));
checkCudaErrors(cudaMemset(d_u, 0, dataSize));
checkCudaErrors(cudaMemset(d_v, 0, dataSize));
// Construct pyramid
for (int level = 1; level < nLevels; level++) {
Swap(pI0[level], pI1[level]);
}
//planeSweepBackward();
//Clone(ps_disparity, pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], d_w);
ComputeOpticalFlowVector(d_w, d_tvxBackward, d_tvyBackward, pW[0], pH[0], pS[0], d_u, d_v);
/*cv::Mat calibrated = cv::Mat(height, stride, CV_32F);
checkCudaErrors(cudaMemcpy((float *)calibrated.ptr(), ps_disparity, width * height * sizeof(float), cudaMemcpyDeviceToHost));
cv::imshow("calibrated", calibrated/(float)planeSweepMaxDisparity);*/
// Solve stereo
for (int level = nLevels - 1; level >= 0; level--) {
for (int warpIter = 0; warpIter < nWarpIters; warpIter++) {
// Compute U,V from W d_w is magnitude of vector d_tvx, d_tvy
// Warp using U,V
//std::cout << "entered" << std::endl;
checkCudaErrors(cudaMemset(d_du, 0, dataSize));
checkCudaErrors(cudaMemset(d_dv, 0, dataSize));
checkCudaErrors(cudaMemset(d_dw, 0, dataSize));
checkCudaErrors(cudaMemset(d_dws, 0, dataSize));
checkCudaErrors(cudaMemset(d_dwmed, 0, dataSize));
checkCudaErrors(cudaMemset(d_dwmeds, 0, dataSize));
checkCudaErrors(cudaMemset(d_pw1, 0, dataSize));
checkCudaErrors(cudaMemset(d_pw2, 0, dataSize));
FindWarpingVector(d_u, d_v, pTvxBackward[level], pTvyBackward[level],
pW[level], pH[level], pS[level], d_tvx2, d_tvy2);
WarpImage(pI1[level], pW[level], pH[level], pS[level], d_u, d_v, d_i1warp);
//std::cout << pW[level] << " " << pH[level] << " " << pS[level] << std::endl;
ComputeDerivativesFisheye(pI0[level], d_i1warp, pTvxBackward[level], pTvyBackward[level],
pW[level], pH[level], pS[level], d_Iw, d_Iz);
/*if (level == 0) {
cv::Mat calibrated = cv::Mat(pH[level], pS[level], CV_32F);
checkCudaErrors(cudaMemcpy((float *)calibrated.ptr(), d_i1warp, pS[level] * pH[level] * sizeof(float), cudaMemcpyDeviceToHost));
cv::imshow("gradient", calibrated);
}*/
// Inner iteration
for (int iter = 0; iter < nSolverIters; ++iter)
{
SolveDataL1Stereo(d_dwmed,
d_pw1, d_pw2,
d_Iw, d_Iz,
pW[level], pH[level], pS[level],
lambda, theta,
d_dwmeds); //du1 = duhat output
Swap(d_dwmed, d_dwmeds);
SolveSmoothDualTVGlobalStereo(d_dwmed,
d_pw1, d_pw2,
pW[level], pH[level], pS[level],
tau, theta,
d_pw1s, d_pw2s);
Swap(d_pw1, d_pw1s);
Swap(d_pw2, d_pw2s);
}
// Sanity Check: Limit disparity to 1
LimitRange(d_dwmed, 1.0f, pW[level], pH[level], pS[level], d_dwmeds);
Swap(d_dwmed, d_dwmeds);
//// One median filtering
MedianFilterDisparity(d_dwmed, pW[level], pH[level], pS[level],
d_dwmeds, 5);
Swap(d_dwmed, d_dwmeds);
//// Calculate d_du, d_dv
ComputeOpticalFlowVector(d_dwmed, d_tvx2, d_tvy2, pW[level], pH[level], pS[level], d_du, d_dv);
//// update w, u, v
Add(d_w, d_dwmed, pH[level] * pS[level], d_w);
Add(d_u, d_du, pH[level] * pS[level], d_u);
Add(d_v, d_dv, pH[level] * pS[level], d_v);
}
// Upscale
if (level > 0)
{
float scale = fScale;
Upscale(d_u, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_us);
Upscale(d_v, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_vs);
Upscale(d_w, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_ws);
Swap(d_u, d_us);
Swap(d_v, d_vs);
Swap(d_w, d_ws);
}
}
Clone(d_w, width, height, stride, d_wBackward);
if (visualizeResults) {
FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale);
}
return 0;
}
int Stereo::occlusionCheck(float threshold) {
isOcclusionChecked = true;
// Get wFinal
OcclusionCheck(d_wForward, d_wBackward, threshold, d_uForward, d_vForward, width, height, stride, d_wFinal);
return 0;
}
int Stereo::planeSweepForward() {
// Plane sweep on level=1
int planeSweepLevel = 0;
checkCudaErrors(cudaMemset(ps_error, 0, dataSize));
checkCudaErrors(cudaMemset(ps_depth, 0, dataSize));
checkCudaErrors(cudaMemset(ps_disparity, 0, dataSize));
Clone(pI1[planeSweepLevel], pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], ps_i1warp);
SetValue(ps_error, planeSweepMaxError, pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel]);
for (int sweep = 0; sweep < planeSweepMaxDisparity; sweep += planeSweepStride) {
PlaneSweepCorrelation(ps_i1warp, pI0[planeSweepLevel], ps_disparity, sweep, planeSweepWindow,
pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], ps_error);
for (int psStride = 0; psStride < planeSweepStride; psStride++) {
WarpImage(ps_i1warp, pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], d_tvxForward, d_tvyForward, ps_i1warps);
Swap(ps_i1warp, ps_i1warps);
}
}
//Clone(ps_disparity, pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], d_w);
return 0;
}
int Stereo::planeSweepBackward() {
// Plane sweep on level=1
int planeSweepLevel = 0;
checkCudaErrors(cudaMemset(ps_error, 0, dataSize));
checkCudaErrors(cudaMemset(ps_depth, 0, dataSize));
checkCudaErrors(cudaMemset(ps_disparity, 0, dataSize));
Clone(pI1[planeSweepLevel], pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], ps_i1warp);
SetValue(ps_error, planeSweepMaxError, pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel]);
for (int sweep = 0; sweep < planeSweepMaxDisparity; sweep += planeSweepStride) {
PlaneSweepCorrelation(ps_i1warp, pI0[planeSweepLevel], ps_disparity, sweep, planeSweepWindow,
pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel], ps_error);
for (int psStride = 0; psStride < planeSweepStride; psStride++) {
WarpImage(ps_i1warp, pW[planeSweepLevel], pH[planeSweepLevel], pS[planeSweepLevel],
d_tvxBackward, d_tvyBackward, ps_i1warps);
Swap(ps_i1warp, ps_i1warps);
}
}
return 0;
}
int Stereo::planeSweepOcclusionCheck() {
isPlaneSweepOcclusionChecked = true;
// Get wFinal
return 0;
}
int Stereo::copyStereoToHost(cv::Mat &wCropped) {
// Convert Disparity to Depth
if (isOcclusionChecked) {
ConvertDisparityToDepth(d_wFinal, baseline, focal, width, height, stride, d_depth);
}
else {
ConvertDisparityToDepth(d_w, baseline, focal, width, height, stride, d_depth);
}
// Remove Padding
//checkCudaErrors(cudaMemcpy((float *)depth.ptr(), d_w, stride * height * sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy((float *)depth.ptr(), d_depth, stride * height * sizeof(float), cudaMemcpyDeviceToHost));
cv::Rect roi(0, 0, width, height); // define roi here as x0, y0, width, height
wCropped = depth(roi);
return 0;
}
int Stereo::copyPlaneSweepToHost(cv::Mat &ps) {
// Convert Disparity to Depth
if (isPlaneSweepOcclusionChecked) {
ConvertDisparityToDepth(ps_disparityFinal, baseline, focal, width, height, stride, ps_depth);
}
else {
ConvertDisparityToDepth(ps_disparity, baseline, focal, width, height, stride, ps_depth);
}
// Remove Padding
//checkCudaErrors(cudaMemcpy((float *)depth.ptr(), d_w, stride * height * sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy((float *)planeSweepDepth.ptr(), ps_depth, stride * height * sizeof(float), cudaMemcpyDeviceToHost));
cv::Rect roi(0, 0, width, height); // define roi here as x0, y0, width, height
ps = planeSweepDepth(roi);
return 0;
}
int Stereo::solveOpticalFlow() {
// construct pyramid
for (int level = 1; level < nLevels; level++) {
Downscale(pI0[level - 1],
pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level],
pI0[level]);
Downscale(pI1[level - 1],
pW[level - 1], pH[level - 1], pS[level - 1],
pW[level], pH[level], pS[level],
pI1[level]);
}
// solve flow
checkCudaErrors(cudaMemset(d_u, 0, dataSize));
checkCudaErrors(cudaMemset(d_v, 0, dataSize));
for (int level = nLevels - 1; level >= 0; level--) {
for (int warpIter = 0; warpIter < nWarpIters; warpIter++) {
//std::cout << level << std::endl;
//initialize zeros
checkCudaErrors(cudaMemset(d_du, 0, dataSize));
checkCudaErrors(cudaMemset(d_dv, 0, dataSize));
checkCudaErrors(cudaMemset(d_dus, 0, dataSize));
checkCudaErrors(cudaMemset(d_dvs, 0, dataSize));
checkCudaErrors(cudaMemset(d_dumed, 0, dataSize));
checkCudaErrors(cudaMemset(d_dvmed, 0, dataSize));
checkCudaErrors(cudaMemset(d_dumeds, 0, dataSize));
checkCudaErrors(cudaMemset(d_dvmeds, 0, dataSize));
checkCudaErrors(cudaMemset(d_pu1, 0, dataSize));
checkCudaErrors(cudaMemset(d_pu2, 0, dataSize));
checkCudaErrors(cudaMemset(d_pv1, 0, dataSize));
checkCudaErrors(cudaMemset(d_pv2, 0, dataSize));
//warp frame 1
WarpImage(pI1[level], pW[level], pH[level], pS[level], d_u, d_v, d_i1warp);
ComputeDerivatives(pI0[level], d_i1warp, pW[level], pH[level], pS[level], d_Ix, d_Iy, d_Iz);
//inner iteration
for (int iter = 0; iter < nSolverIters; ++iter)
{
SolveDataL1(d_dumed, d_dvmed,
d_pu1, d_pu2,
d_pv1, d_pv2,
d_Ix, d_Iy, d_Iz,
pW[level], pH[level], pS[level],
lambda, theta,
d_dumeds, d_dvmeds); //du1 = duhat output
Swap(d_dumed, d_dumeds);
Swap(d_dvmed, d_dvmeds);
SolveSmoothDualTVGlobal(d_dumed, d_dvmed,
d_pu1, d_pu2, d_pv1, d_pv2,
pW[level], pH[level], pS[level],
tau, theta,
d_pu1s, d_pu2s, d_pv1s, d_pv2s);
Swap(d_pu1, d_pu1s);
Swap(d_pu2, d_pu2s);
Swap(d_pv1, d_pv1s);
Swap(d_pv2, d_pv2s);
//***********************************
/*MedianFilter(d_dumed, d_dvmed, pW[level], pH[level], pS[level],
d_dumeds, d_dvmeds, 5);
Swap(d_dumed, d_dumeds);
Swap(d_dvmed, d_dvmeds);*/
}
// one median filtering
MedianFilter(d_dumed, d_dvmed, pW[level], pH[level], pS[level],
d_dumeds, d_dvmeds, 5);
Swap(d_dumed, d_dumeds);
Swap(d_dvmed, d_dvmeds);
// update u, v
Add(d_u, d_dumed, pH[level] * pS[level], d_u);
Add(d_v, d_dvmed, pH[level] * pS[level], d_v);
/*
MedianFilter(d_u, d_v, pW[level], pH[level], pS[level],
d_dumeds, d_dvmeds, 5);
Swap(d_u, d_dumeds);
Swap(d_v, d_dvmeds);*/
}
//upscale
if (level > 0)
{
// scale uv
//float scale = (float)pW[level + 1] / (float)pW[level];
float scale = fScale;
Upscale(d_u, pW[level], pH[level], pS[level],
pW[level - 1], pH[level - 1], pS[level - 1], scale, d_us);
//float scaleY = (float)pH[level + 1] / (float)pH[level];
Upscale(d_v, pW[level], pH[level], pS[level],
pW[level - 1], pH[level - 1], pS[level - 1], scale, d_vs);
Swap(d_u, d_us);
Swap(d_v, d_vs);
}
}
if (visualizeResults) {
FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale);
}
//FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale);
//SolveSceneFlow(d_u, d_v, d_depth016u, d_depth116u, width, height, stride, d_sceneflow);
//std::cout << stride << " " << height << " " << height << " " << inputChannels << std::endl;
return 0;
}
int Stereo::copyOpticalFlowVisToHost(cv::Mat &uvrgbCropped) {
// Remove Padding
checkCudaErrors(cudaMemcpy((float3 *)uvrgb.ptr(), d_uvrgb, width * height * sizeof(float) * 3, cudaMemcpyDeviceToHost));
cv::Rect roi(0, 0, width, height); // define roi here as x0, y0, width, height
uvrgbCropped = uvrgb(roi);
return 0;
}
int Stereo::copyOpticalFlowToHost(cv::Mat &u, cv::Mat &v) {
checkCudaErrors(cudaMemcpy((float *)upad.ptr(), d_u, stride * height * sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy((float *)vpad.ptr(), d_v, stride * height * sizeof(float), cudaMemcpyDeviceToHost));
// Remove Padding
cv::Rect roi(0, 0, width, height); // define roi here as x0, y0, width, height
u = upad(roi);
v = vpad(roi);
return 0;
}
// Align up n to the nearest multiple of m
inline int Stereo::iAlignUp(int n)
{
int m = this->StrideAlignment;
int mod = n % m;
if (mod)
return n + m - mod;
else
return n;
}
int Stereo::iDivUp(int n, int m)
{
return (n + m - 1) / m;
}
// swap two values
template<typename T>
inline void Stereo::Swap(T &a, T &ax)
{
T t = a;
a = ax;
ax = t;
}
//swap four values
template<typename T>
inline void Stereo::Swap(T &a, T &ax, T &b, T &bx)
{
Swap(a, ax);
Swap(b, bx);
}
//swap eight values
template<typename T>
inline void Stereo::Swap(T &a, T &ax, T &b, T &bx, T &c, T &cx, T &d, T &dx)
{
Swap(a, ax);
Swap(b, bx);
Swap(c, cx);
Swap(d, dx);
}
int Stereo::computePyramidLevels(int width, int height, int minWidth, float scale) {
int nLevels = 1;
int pHeight = (int)((float)height / scale);
while (pHeight > minWidth) {
nLevels++;
pHeight = (int)((float)pHeight / scale);
}
std::cout << "Pyramid Levels: " << nLevels << std::endl;
return nLevels;
}
int Stereo::initializeColorWheel() {
checkCudaErrors(cudaMalloc(&d_colorwheel, 55 * 3 * sizeof(float)));
float colorwheel[165] = { 255, 0, 0,
255, 17, 0,
255, 34, 0,
255, 51, 0,
255, 68, 0,
255, 85, 0,
255, 102, 0,
255, 119, 0,
255, 136, 0,
255, 153, 0,
255, 170, 0,
255, 187, 0,
255, 204, 0,
255, 221, 0,
255, 238, 0,
255, 255, 0,
213, 255, 0,
170, 255, 0,
128, 255, 0,
85, 255, 0,
43, 255, 0,
0, 255, 0,
0, 255, 63,
0, 255, 127,
0, 255, 191,
0, 255, 255,
0, 232, 255,
0, 209, 255,
0, 186, 255,
0, 163, 255,
0, 140, 255,
0, 116, 255,
0, 93, 255,
0, 70, 255,
0, 47, 255,
0, 24, 255,
0, 0, 255,
19, 0, 255,
39, 0, 255,
58, 0, 255,
78, 0, 255,
98, 0, 255,
117, 0, 255,
137, 0, 255,
156, 0, 255,
176, 0, 255,
196, 0, 255,
215, 0, 255,
235, 0, 255,
255, 0, 255,
255, 0, 213,
255, 0, 170,
255, 0, 128,
255, 0, 85,
255, 0, 43 };
checkCudaErrors(cudaMemcpy(colorwheel, d_colorwheel, 55 * 3 * sizeof(float), cudaMemcpyDeviceToHost));
return 0;
} |
11cfd6f6915fc010444114f195b47d597685ed2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/gather_op.h"
namespace caffe2 {
template <typename T_INDEX>
__global__ void GatherKernel(
const float* X,
float* Y,
const T_INDEX* indices,
const int N,
const int block_size) {
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = indices[i];
const float* src_offset = X + idx * block_size;
float* dst_offset = Y + i * block_size;
for (int j = threadIdx.x; j < block_size; j += blockDim.x) {
dst_offset[j] = src_offset[j];
}
}
}
template <>
bool GatherOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, OperatorBase::Input<TensorCUDA>(INDICES));
}
template <>
template <typename Index>
bool GatherOp<CUDAContext>::DoRunWithType() {
auto& data = Input(DATA);
auto& indices = Input(INDICES);
auto* output = Output(0);
CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D");
auto shape = indices.dims();
shape.insert(shape.end(), data.dims().begin() + 1, data.dims().end());
output->Resize(shape);
int block_size = data.size() / data.dim(0);
auto block_bytesize = data.size_from_dim(1) * data.meta().itemsize();
CAFFE_ENFORCE(
block_bytesize == data.nbytes() / data.dim(0),
"block_bytesize should be consistent with data dim");
int N = indices.size();
auto src_base = static_cast<const float*>(data.raw_data());
const Index* idxs = indices.template data<Index>();
auto out = static_cast<float*>(output->raw_mutable_data(data.meta()));
// return early when the input is empty, since CUDA kernel will fail for
// empty input.
if (N <= 0) {
return true;
}
hipLaunchKernelGGL(( GatherKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), src_base, out, idxs, N, block_size);
return true;
}
REGISTER_CUDA_OPERATOR(Gather, GatherOp<CUDAContext>);
} // namespace caffe2
| 11cfd6f6915fc010444114f195b47d597685ed2b.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/gather_op.h"
namespace caffe2 {
template <typename T_INDEX>
__global__ void GatherKernel(
const float* X,
float* Y,
const T_INDEX* indices,
const int N,
const int block_size) {
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = indices[i];
const float* src_offset = X + idx * block_size;
float* dst_offset = Y + i * block_size;
for (int j = threadIdx.x; j < block_size; j += blockDim.x) {
dst_offset[j] = src_offset[j];
}
}
}
template <>
bool GatherOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, OperatorBase::Input<TensorCUDA>(INDICES));
}
template <>
template <typename Index>
bool GatherOp<CUDAContext>::DoRunWithType() {
auto& data = Input(DATA);
auto& indices = Input(INDICES);
auto* output = Output(0);
CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D");
auto shape = indices.dims();
shape.insert(shape.end(), data.dims().begin() + 1, data.dims().end());
output->Resize(shape);
int block_size = data.size() / data.dim(0);
auto block_bytesize = data.size_from_dim(1) * data.meta().itemsize();
CAFFE_ENFORCE(
block_bytesize == data.nbytes() / data.dim(0),
"block_bytesize should be consistent with data dim");
int N = indices.size();
auto src_base = static_cast<const float*>(data.raw_data());
const Index* idxs = indices.template data<Index>();
auto out = static_cast<float*>(output->raw_mutable_data(data.meta()));
// return early when the input is empty, since CUDA kernel will fail for
// empty input.
if (N <= 0) {
return true;
}
GatherKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(src_base, out, idxs, N, block_size);
return true;
}
REGISTER_CUDA_OPERATOR(Gather, GatherOp<CUDAContext>);
} // namespace caffe2
|
261ace75ae2071b0612736bc45f03d6287a02cbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
//implement the kernel
__global__ void reverseArrayBlock(int *out_d, int *in_d)
{
int inOffset = blockDim.x * blockIdx.x;
int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x);
int in = inOffset + threadIdx.x;
int out = outOffset + (blockDim.x - 1 - threadIdx.x);
out_d[out] = in_d[in];
}
//program main
int main(int argc, char** argv)
{
//pointer to host memory and size
int *a_h;
int dimA = 256 * 1024; // 256K elements (1MB total)
//pointer to device memory
int *b_d;
int *a_d;
//define grid and block size
int numThreads = 256;
//compute number of blocks needed based on array size and desired block size
int numBlocks = dimA / numThreads;
//allocate host and device memory
size_t memSize = numBlocks * numThreads * sizeof(int);
a_h = (int *) malloc(memSize);
hipMalloc((void **) &a_d, memSize);
hipMalloc((void **) &b_d, memSize);
//initialise input array on host
for(int i = 0; i < dimA; ++i)
{
a_h[i] = i;
}
//copy host array to device array
hipMemcpy(a_d, a_h, memSize, hipMemcpyHostToDevice);
//launch kernel
hipLaunchKernelGGL(( reverseArrayBlock), dim3(numBlocks), dim3(numThreads), 0, 0, b_d, a_d);
//device to host copy
hipMemcpy(a_h, b_d, memSize, hipMemcpyDeviceToHost );
//verify the data returned to the host is correct
for (int i = 0; i < dimA; i++)
{
assert(a_h[i] == dimA - 1 - i);
}
//free device memory
hipFree(a_d);
hipFree(b_d);
//free host memory
free(a_h);
//if the program makes it this far, then the results are correct and there are no run-time errors
printf("Correct!\n");
return 0;
} | 261ace75ae2071b0612736bc45f03d6287a02cbd.cu | #include <stdio.h>
#include <assert.h>
//implement the kernel
__global__ void reverseArrayBlock(int *out_d, int *in_d)
{
int inOffset = blockDim.x * blockIdx.x;
int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x);
int in = inOffset + threadIdx.x;
int out = outOffset + (blockDim.x - 1 - threadIdx.x);
out_d[out] = in_d[in];
}
//program main
int main(int argc, char** argv)
{
//pointer to host memory and size
int *a_h;
int dimA = 256 * 1024; // 256K elements (1MB total)
//pointer to device memory
int *b_d;
int *a_d;
//define grid and block size
int numThreads = 256;
//compute number of blocks needed based on array size and desired block size
int numBlocks = dimA / numThreads;
//allocate host and device memory
size_t memSize = numBlocks * numThreads * sizeof(int);
a_h = (int *) malloc(memSize);
cudaMalloc((void **) &a_d, memSize);
cudaMalloc((void **) &b_d, memSize);
//initialise input array on host
for(int i = 0; i < dimA; ++i)
{
a_h[i] = i;
}
//copy host array to device array
cudaMemcpy(a_d, a_h, memSize, cudaMemcpyHostToDevice);
//launch kernel
reverseArrayBlock<<<numBlocks, numThreads>>>(b_d, a_d);
//device to host copy
cudaMemcpy(a_h, b_d, memSize, cudaMemcpyDeviceToHost );
//verify the data returned to the host is correct
for (int i = 0; i < dimA; i++)
{
assert(a_h[i] == dimA - 1 - i);
}
//free device memory
cudaFree(a_d);
cudaFree(b_d);
//free host memory
free(a_h);
//if the program makes it this far, then the results are correct and there are no run-time errors
printf("Correct!\n");
return 0;
} |
9121e0ea4fc2889aa031c9b61723696144d90e9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
////////////////////////////////////////////////////////////////////////////////
// Includes
#include <stdio.h>
#include <cutil.h>
////////////////////////////////////////////////////////////////////////////////
// Static variable declarations
// Initial execution configuration
#define NUM_THREADS_PER_BLOCK 512
#define NUM_BLOCKS 4
#define NUM_THREADS (NUM_THREADS_PER_BLOCK * NUM_BLOCKS)
// Result buffer
static int* d_Result;
////////////////////////////////////////////////////////////////////////////////
// Forward declarations
__global__ void reduce_kernel(const int*, unsigned int, int*);
////////////////////////////////////////////////////////////////////////////////
//! Allocate any necessary device memory.
////////////////////////////////////////////////////////////////////////////////
extern "C" void reduceAllocate()
{
CUDA_SAFE_CALL(hipMalloc((void**)&d_Result, NUM_BLOCKS * sizeof(int)));
}
////////////////////////////////////////////////////////////////////////////////
//! Free device memory.
////////////////////////////////////////////////////////////////////////////////
extern "C" void reduceFree()
{
CUDA_SAFE_CALL(hipFree(d_Result));
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the sum of all values from the input array through parallel
//! reduction.
//! @param values input array of values
//! @param numValues number of values
////////////////////////////////////////////////////////////////////////////////
extern "C" int* reduce(const int* values, unsigned int numValues)
{
// Execution configuration
int numThreadsPerBlock = NUM_THREADS_PER_BLOCK;
int numBlocks = NUM_BLOCKS;
// The first pass reduces the input array to an array of size equal to the
// total number of blocks in the grid
int sharedMemorySize = numThreadsPerBlock * sizeof(int);
hipLaunchKernelGGL(( reduce_kernel), dim3(numBlocks), dim3(numThreadsPerBlock), sharedMemorySize, 0,
values, numValues, d_Result);
CUT_CHECK_ERROR("Kernel execution failed");
// The second pass launches only one block to perform the final reduction
numThreadsPerBlock = numBlocks;
numBlocks = 1;
sharedMemorySize = numThreadsPerBlock * sizeof(int);
hipLaunchKernelGGL(( reduce_kernel), dim3(numBlocks), dim3(numThreadsPerBlock), sharedMemorySize, 0,
d_Result, numThreadsPerBlock, d_Result);
return d_Result;
}
////////////////////////////////////////////////////////////////////////////////
//! Reduce an array of input values
//! @param valuesIn array of input values
//! @param numValues number of input values
//! @param valuesOut array of reduced values
////////////////////////////////////////////////////////////////////////////////
__global__ void reduce_kernel(const int* valuesIn, unsigned int numValues,
int* valuesOut)
{
// Execution configuration
int numBlocks = gridDim.x;
int numThreadsPerBlock = blockDim.x;
int numThreads = numBlocks * numThreadsPerBlock;
// Index in the grid
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int index = blockID * numThreadsPerBlock + threadID;
// Each thread processes as many values as necessary with a stride equal to
// the total number of threads in the grid and stores the result in shared
// memory
extern __shared__ int sresult[];
//sresult[0 /* TODO */] = 0;
sresult[threadID] = 0;
for (int i = index; i < numValues; i += numThreads)
//sresult[0 /* TODO */] += valuesIn[i];
sresult[threadID] += valuesIn[i];
// Parallel reduction
//for (int stride = 1; stride < 0 /* TODO */; stride *= 2) {
for (int stride = 1; stride < numThreadsPerBlock; stride *= 2) {
// Synchronize threads to make sure each thread is done updating shared
// memory; this is necessary because threads read results that have been
// written by other threads
/* TODO */;
__syncthreads();
// Check if thread is active during this loop iteration
//if (threadID % (0 /* TODO */) == 0)
if (threadID % (stride * 2) == 0)
// Accumulate one element from sresult into another
//sresult[threadID] += sresult[0 /* TODO */];
sresult[threadID] += sresult[threadID + stride];
}
// Thread 0 of each block writes the final result of the reduction to
// device memory
if (threadID == 0)
//valuesOut[0 /* TODO */] = sresult[0];
valuesOut[blockID] = sresult[0];
}
| 9121e0ea4fc2889aa031c9b61723696144d90e9e.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
////////////////////////////////////////////////////////////////////////////////
// Includes
#include <stdio.h>
#include <cutil.h>
////////////////////////////////////////////////////////////////////////////////
// Static variable declarations
// Initial execution configuration
#define NUM_THREADS_PER_BLOCK 512
#define NUM_BLOCKS 4
#define NUM_THREADS (NUM_THREADS_PER_BLOCK * NUM_BLOCKS)
// Result buffer
static int* d_Result;
////////////////////////////////////////////////////////////////////////////////
// Forward declarations
__global__ void reduce_kernel(const int*, unsigned int, int*);
////////////////////////////////////////////////////////////////////////////////
//! Allocate any necessary device memory.
////////////////////////////////////////////////////////////////////////////////
extern "C" void reduceAllocate()
{
CUDA_SAFE_CALL(cudaMalloc((void**)&d_Result, NUM_BLOCKS * sizeof(int)));
}
////////////////////////////////////////////////////////////////////////////////
//! Free device memory.
////////////////////////////////////////////////////////////////////////////////
extern "C" void reduceFree()
{
CUDA_SAFE_CALL(cudaFree(d_Result));
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the sum of all values from the input array through parallel
//! reduction.
//! @param values input array of values
//! @param numValues number of values
////////////////////////////////////////////////////////////////////////////////
extern "C" int* reduce(const int* values, unsigned int numValues)
{
// Execution configuration
int numThreadsPerBlock = NUM_THREADS_PER_BLOCK;
int numBlocks = NUM_BLOCKS;
// The first pass reduces the input array to an array of size equal to the
// total number of blocks in the grid
int sharedMemorySize = numThreadsPerBlock * sizeof(int);
reduce_kernel<<<numBlocks, numThreadsPerBlock, sharedMemorySize>>>
(values, numValues, d_Result);
CUT_CHECK_ERROR("Kernel execution failed");
// The second pass launches only one block to perform the final reduction
numThreadsPerBlock = numBlocks;
numBlocks = 1;
sharedMemorySize = numThreadsPerBlock * sizeof(int);
reduce_kernel<<<numBlocks, numThreadsPerBlock, sharedMemorySize>>>
(d_Result, numThreadsPerBlock, d_Result);
return d_Result;
}
////////////////////////////////////////////////////////////////////////////////
//! Reduce an array of input values
//! @param valuesIn array of input values
//! @param numValues number of input values
//! @param valuesOut array of reduced values
////////////////////////////////////////////////////////////////////////////////
__global__ void reduce_kernel(const int* valuesIn, unsigned int numValues,
int* valuesOut)
{
// Execution configuration
int numBlocks = gridDim.x;
int numThreadsPerBlock = blockDim.x;
int numThreads = numBlocks * numThreadsPerBlock;
// Index in the grid
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int index = blockID * numThreadsPerBlock + threadID;
// Each thread processes as many values as necessary with a stride equal to
// the total number of threads in the grid and stores the result in shared
// memory
extern __shared__ int sresult[];
//sresult[0 /* TODO */] = 0;
sresult[threadID] = 0;
for (int i = index; i < numValues; i += numThreads)
//sresult[0 /* TODO */] += valuesIn[i];
sresult[threadID] += valuesIn[i];
// Parallel reduction
//for (int stride = 1; stride < 0 /* TODO */; stride *= 2) {
for (int stride = 1; stride < numThreadsPerBlock; stride *= 2) {
// Synchronize threads to make sure each thread is done updating shared
// memory; this is necessary because threads read results that have been
// written by other threads
/* TODO */;
__syncthreads();
// Check if thread is active during this loop iteration
//if (threadID % (0 /* TODO */) == 0)
if (threadID % (stride * 2) == 0)
// Accumulate one element from sresult into another
//sresult[threadID] += sresult[0 /* TODO */];
sresult[threadID] += sresult[threadID + stride];
}
// Thread 0 of each block writes the final result of the reduction to
// device memory
if (threadID == 0)
//valuesOut[0 /* TODO */] = sresult[0];
valuesOut[blockID] = sresult[0];
}
|
a7915d70a45fb7cfadc23a9eab5041721fd6d22f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include "RayTracing.h"
#include "SphereCreator.h"
#include <assert.h>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void rayTracing(uchar4* ptrDevPixels, Sphere* ptrDevTabSphere, int nbSphere,uint w, uint h,float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
RayTracing::RayTracing(const Grid& grid, uint w, uint h, float dt) :
Animable_I<uchar4>(grid, w, h, "RayTracing_Cuda_RGBA_uchar4")
{
// Inputs
this->dt = dt;
this->nbSphere = 35;
SphereCreator shereCreator(this->nbSphere, w, h); // sur la pile
Sphere* ptrTabSphere = shereCreator.getTabSphere();
this->sizeOctetSphere = sizeof(Sphere) * this->nbSphere;
// transfert to GM
toGM(ptrTabSphere);
// transfert to CM
toCM(ptrTabSphere); // a implemneter
// Tools
this->t = 0; // protected dans Animable
}
RayTracing::~RayTracing()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void RayTracing::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
//Device::lastCudaError("RayTracing rgba uchar4 (before kernel)"); // facultatif, for debug only, remove for release
// TODO lancer le kernel avec <<<dg,db>>>
// le kernel est importer ci-dessus (ligne 19)
hipLaunchKernelGGL(( rayTracing), dim3(dg),dim3(db), 0, 0, ptrDevPixels,this->ptrDevTabSphere, this->nbSphere,w, h, t);
//Device::lastCudaError("RayTracing rgba uchar4 (after kernel)"); // facultatif, for debug only, remove for release
}
void RayTracing::toGM(Sphere* ptrSphere)
{
Device::malloc(&this->ptrDevTabSphere, sizeOctetSphere);
Device::memcpyHToD(this->ptrDevTabSphere, ptrSphere, sizeOctetSphere);
}
void RayTracing::toCM(Sphere* ptrDevSphereCreator)
{
}
/**
* Override
* Call periodicly by the API
*/
void RayTracing::animationStep()
{
t += dt;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| a7915d70a45fb7cfadc23a9eab5041721fd6d22f.cu | #include <iostream>
#include <assert.h>
#include "Device.h"
#include "RayTracing.h"
#include "SphereCreator.h"
#include <assert.h>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void rayTracing(uchar4* ptrDevPixels, Sphere* ptrDevTabSphere, int nbSphere,uint w, uint h,float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
RayTracing::RayTracing(const Grid& grid, uint w, uint h, float dt) :
Animable_I<uchar4>(grid, w, h, "RayTracing_Cuda_RGBA_uchar4")
{
// Inputs
this->dt = dt;
this->nbSphere = 35;
SphereCreator shereCreator(this->nbSphere, w, h); // sur la pile
Sphere* ptrTabSphere = shereCreator.getTabSphere();
this->sizeOctetSphere = sizeof(Sphere) * this->nbSphere;
// transfert to GM
toGM(ptrTabSphere);
// transfert to CM
toCM(ptrTabSphere); // a implemneter
// Tools
this->t = 0; // protected dans Animable
}
RayTracing::~RayTracing()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void RayTracing::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
//Device::lastCudaError("RayTracing rgba uchar4 (before kernel)"); // facultatif, for debug only, remove for release
// TODO lancer le kernel avec <<<dg,db>>>
// le kernel est importer ci-dessus (ligne 19)
rayTracing<<<dg,db>>>(ptrDevPixels,this->ptrDevTabSphere, this->nbSphere,w, h, t);
//Device::lastCudaError("RayTracing rgba uchar4 (after kernel)"); // facultatif, for debug only, remove for release
}
void RayTracing::toGM(Sphere* ptrSphere)
{
Device::malloc(&this->ptrDevTabSphere, sizeOctetSphere);
Device::memcpyHToD(this->ptrDevTabSphere, ptrSphere, sizeOctetSphere);
}
void RayTracing::toCM(Sphere* ptrDevSphereCreator)
{
}
/**
* Override
* Call periodicly by the API
*/
void RayTracing::animationStep()
{
t += dt;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
8de711b0850368b13bb794e4cfef1892ba47b293.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
template <typename data_t>
__global__ void relu_cuda_forward_kernel(const data_t* x,
data_t* y,
const int num) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gid; i < num; i += blockDim.x * gridDim.x) {
y[i] = max(x[i], static_cast<data_t>(0.));
}
}
template <typename data_t>
__global__ void relu_cuda_backward_kernel(const data_t* dy,
const data_t* y,
data_t* dx,
const int num) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gid; i < num; i += blockDim.x * gridDim.x) {
dx[i] = dy[i] * (y[i] > 0 ? 1. : 0.);
}
}
std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) {
auto out = paddle::Tensor(paddle::PlaceType::kGPU);
out.reshape(x.shape());
int numel = x.size();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_TYPES(
x.type(), "relu_cuda_forward_kernel", ([&] {
hipLaunchKernelGGL(( relu_cuda_forward_kernel<data_t>), dim3(grid), dim3(block), 0, x.stream(),
x.data<data_t>(), out.mutable_data<data_t>(x.place()), numel);
}));
return {out};
}
std::vector<paddle::Tensor> relu_cuda_backward(const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kGPU);
grad_x.reshape(x.shape());
int numel = out.size();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_TYPES(
out.type(), "relu_cuda_backward_kernel", ([&] {
hipLaunchKernelGGL(( relu_cuda_backward_kernel<data_t>), dim3(grid), dim3(block), 0, x.stream(),
grad_out.data<data_t>(),
out.data<data_t>(),
grad_x.mutable_data<data_t>(x.place()),
numel);
}));
return {grad_x};
}
| 8de711b0850368b13bb794e4cfef1892ba47b293.cu | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
template <typename data_t>
__global__ void relu_cuda_forward_kernel(const data_t* x,
data_t* y,
const int num) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gid; i < num; i += blockDim.x * gridDim.x) {
y[i] = max(x[i], static_cast<data_t>(0.));
}
}
template <typename data_t>
__global__ void relu_cuda_backward_kernel(const data_t* dy,
const data_t* y,
data_t* dx,
const int num) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gid; i < num; i += blockDim.x * gridDim.x) {
dx[i] = dy[i] * (y[i] > 0 ? 1. : 0.);
}
}
std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) {
auto out = paddle::Tensor(paddle::PlaceType::kGPU);
out.reshape(x.shape());
int numel = x.size();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_TYPES(
x.type(), "relu_cuda_forward_kernel", ([&] {
relu_cuda_forward_kernel<data_t><<<grid, block, 0, x.stream()>>>(
x.data<data_t>(), out.mutable_data<data_t>(x.place()), numel);
}));
return {out};
}
std::vector<paddle::Tensor> relu_cuda_backward(const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kGPU);
grad_x.reshape(x.shape());
int numel = out.size();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_TYPES(
out.type(), "relu_cuda_backward_kernel", ([&] {
relu_cuda_backward_kernel<data_t><<<grid, block, 0, x.stream()>>>(
grad_out.data<data_t>(),
out.data<data_t>(),
grad_x.mutable_data<data_t>(x.place()),
numel);
}));
return {grad_x};
}
|
43ac6d51c97e094e7353f4045541d0e95fb32656.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/sort.h>
#include "cudamatrix_types.cuh"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <ctime>
#include <cstring>
#include "hip/hip_runtime.h"
#include "common_functions.h"
#include "sm_20_intrinsics.h"
#include "host_defines.h"
#include <iostream>
#include "hip/hip_texture_types.h"
#include "texture_fetch_functions.h"
#include "builtin_types.h"
#include "cutil.h"
#include "hip/device_functions.h"
# define CUDA_SAFE_KERNEL(call) { \
call; \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
if ( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) \
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
#define PRESCAN_BLOCK_SIZE 512
int scan_level=0;
__global__
void binning_prescan(cudaMatrixui blockoffsets, cudaMatrixui gridoffsets,int nptcls_bin)
{
uint idx = threadIdx.x;
uint gidx = blockIdx.x*blockDim.x+idx;
uint binid = blockIdx.y;
int offset = 1;
const uint n = 2*PRESCAN_BLOCK_SIZE;
__shared__ uint sdata[2*n];
uint low[4];
uint high[4];
// Load data from global memory
for(int i=0;i<4;i++)
{
if((8*gidx+i)<nptcls_bin)
{
low[i] = blockoffsets(8*gidx+i,binid);
}
else
{
low[i] = 0;
}
if((8*gidx+4+i)<nptcls_bin)
{
high[i] = blockoffsets(8*gidx+4+i,binid);
}
else
{
high[i] = 0;
}
}
// Calculate the sum for 2 sets of 4 elements
for(int i=1;i<4;i++)
{
low[i] += low[i-1];
high[i] += high[i-1];
}
int ai = 2*idx;
int bi = 2*idx+1;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Load local results into shared memory
sdata[ai+bankOffsetA] = low[3];
sdata[bi+bankOffsetB] = high[3];
for(int d=n>>1;d>0;d>>=1) // build sum in place up the tree
{
__syncthreads();
if(idx < d)
{
ai = offset*(2*idx+1)-1;
bi = offset*(2*idx+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
sdata[bi] += sdata[ai];
}
offset *= 2;
}
if(idx == 0){sdata[n-1 + CONFLICT_FREE_OFFSET(n-1)] = 0;}
// Traverse down the tree and build scan
for(int d = 1; d<n;d*=2)
{
offset >>= 1;
__syncthreads();
if(idx < d)
{
ai = offset*(2*idx+1)-1;
bi = offset*(2*idx+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
uint t = sdata[ai];
sdata[ai] = sdata[bi];
sdata[bi] += t;
}
}
__syncthreads();
//Apply result to local series
ai = 2*idx;
bi = 2*idx+1;
for(int i=0;i<4;i++)
{
low[i] += sdata[ai+bankOffsetA];
high[i] += sdata[bi+bankOffsetB];
}
// Write results back to global memory
for(int i=0;i<4;i++)
{
if((8*gidx+i)<nptcls_bin)
{
blockoffsets(8*gidx+i,binid) = low[i];
}
if((8*gidx+4+i)<nptcls_bin)
{
blockoffsets(8*gidx+4+i,binid) = high[i];
}
}
// Store the sum of the block for further processing
if(idx == (blockDim.x-1)){gridoffsets(blockIdx.x,binid) = high[3];}
}
__global__
void bin_scan_back_add(cudaMatrixui blockoffsets,cudaMatrixui gridoffsets,int nptcls_bin)
{
uint idx = threadIdx.x;
uint gidx = blockIdx.x*blockDim.x+idx;
uint bidx = blockIdx.x;
uint binid = blockIdx.y;
uint block_start = 8*blockIdx.x*blockDim.x;
uint thid;
uint offset;
if((bidx > 0)){ offset = gridoffsets(bidx-1,binid);}
if(bidx == 0){offset = 0;}
__syncthreads();
for(int i=0;i<8;i++)
{
thid = block_start+idx+i*blockDim.x;
if(thid<nptcls_bin) blockoffsets(thid,binid) += offset;
}
}
__host__
void bin_scan(cudaMatrixui &blockoffsets,int nptcls_bin,int nbins)
{
// This function takes an input array blockoffsets, and calculates the cumulative sum of all the elements.
// This function can process up to 8*PRESCAN_BLOCK_SIZE elements per block
// This function was tested and verified on 9/30/2011
int nptcls_block = nptcls_bin/8;
dim3 cudaBlockSize(1,1,1);
dim3 cudaGridSize(1,1,1);
printf("scan_level = %i\n",scan_level);
scan_level++;
cudaBlockSize.x = PRESCAN_BLOCK_SIZE;
cudaGridSize.x = (nptcls_bin+8*PRESCAN_BLOCK_SIZE-1)/(8*PRESCAN_BLOCK_SIZE);
cudaGridSize.y = nbins;
// Allocate space to store the blockwise results
cudaMatrixui gridoffsets(cudaGridSize.x,nbins);
// Scan
hipLaunchKernelGGL(( CUDA_SAFE_KERNEL((binning_prescan), dim3(cudaGridSize),dim3(cudaBlockSize), 0, 0,
blockoffsets,gridoffsets,nptcls_bin)));
// If multiple blocks are used then we have to calculate the cumulative sum of the results of each block
if(cudaGridSize.x > 1)
{
// Recurse
bin_scan(gridoffsets,cudaGridSize.x,nbins);
// Add the block totals to each of the elements in the block
hipLaunchKernelGGL(( CUDA_SAFE_KERNEL((bin_scan_back_add), dim3(cudaGridSize),dim3(cudaBlockSize), 0, 0,
blockoffsets,gridoffsets,nptcls_bin)));
}
// Free memory
gridoffsets.cudaMatrixFree();
}
| 43ac6d51c97e094e7353f4045541d0e95fb32656.cu | #include <thrust/sort.h>
#include "cudamatrix_types.cuh"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <ctime>
#include <cstring>
#include "cuda.h"
#include "common_functions.h"
#include "sm_20_intrinsics.h"
#include "host_defines.h"
#include <iostream>
#include "cuda_texture_types.h"
#include "texture_fetch_functions.h"
#include "builtin_types.h"
#include "cutil.h"
#include "device_functions.h"
# define CUDA_SAFE_KERNEL(call) { \
call; \
cudaDeviceSynchronize(); \
cudaError err = cudaGetLastError(); \
if ( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) \
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
#define PRESCAN_BLOCK_SIZE 512
int scan_level=0;
__global__
void binning_prescan(cudaMatrixui blockoffsets, cudaMatrixui gridoffsets,int nptcls_bin)
{
uint idx = threadIdx.x;
uint gidx = blockIdx.x*blockDim.x+idx;
uint binid = blockIdx.y;
int offset = 1;
const uint n = 2*PRESCAN_BLOCK_SIZE;
__shared__ uint sdata[2*n];
uint low[4];
uint high[4];
// Load data from global memory
for(int i=0;i<4;i++)
{
if((8*gidx+i)<nptcls_bin)
{
low[i] = blockoffsets(8*gidx+i,binid);
}
else
{
low[i] = 0;
}
if((8*gidx+4+i)<nptcls_bin)
{
high[i] = blockoffsets(8*gidx+4+i,binid);
}
else
{
high[i] = 0;
}
}
// Calculate the sum for 2 sets of 4 elements
for(int i=1;i<4;i++)
{
low[i] += low[i-1];
high[i] += high[i-1];
}
int ai = 2*idx;
int bi = 2*idx+1;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Load local results into shared memory
sdata[ai+bankOffsetA] = low[3];
sdata[bi+bankOffsetB] = high[3];
for(int d=n>>1;d>0;d>>=1) // build sum in place up the tree
{
__syncthreads();
if(idx < d)
{
ai = offset*(2*idx+1)-1;
bi = offset*(2*idx+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
sdata[bi] += sdata[ai];
}
offset *= 2;
}
if(idx == 0){sdata[n-1 + CONFLICT_FREE_OFFSET(n-1)] = 0;}
// Traverse down the tree and build scan
for(int d = 1; d<n;d*=2)
{
offset >>= 1;
__syncthreads();
if(idx < d)
{
ai = offset*(2*idx+1)-1;
bi = offset*(2*idx+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
uint t = sdata[ai];
sdata[ai] = sdata[bi];
sdata[bi] += t;
}
}
__syncthreads();
//Apply result to local series
ai = 2*idx;
bi = 2*idx+1;
for(int i=0;i<4;i++)
{
low[i] += sdata[ai+bankOffsetA];
high[i] += sdata[bi+bankOffsetB];
}
// Write results back to global memory
for(int i=0;i<4;i++)
{
if((8*gidx+i)<nptcls_bin)
{
blockoffsets(8*gidx+i,binid) = low[i];
}
if((8*gidx+4+i)<nptcls_bin)
{
blockoffsets(8*gidx+4+i,binid) = high[i];
}
}
// Store the sum of the block for further processing
if(idx == (blockDim.x-1)){gridoffsets(blockIdx.x,binid) = high[3];}
}
__global__
void bin_scan_back_add(cudaMatrixui blockoffsets,cudaMatrixui gridoffsets,int nptcls_bin)
{
uint idx = threadIdx.x;
uint gidx = blockIdx.x*blockDim.x+idx;
uint bidx = blockIdx.x;
uint binid = blockIdx.y;
uint block_start = 8*blockIdx.x*blockDim.x;
uint thid;
uint offset;
if((bidx > 0)){ offset = gridoffsets(bidx-1,binid);}
if(bidx == 0){offset = 0;}
__syncthreads();
for(int i=0;i<8;i++)
{
thid = block_start+idx+i*blockDim.x;
if(thid<nptcls_bin) blockoffsets(thid,binid) += offset;
}
}
__host__
void bin_scan(cudaMatrixui &blockoffsets,int nptcls_bin,int nbins)
{
// This function takes an input array blockoffsets, and calculates the cumulative sum of all the elements.
// This function can process up to 8*PRESCAN_BLOCK_SIZE elements per block
// This function was tested and verified on 9/30/2011
int nptcls_block = nptcls_bin/8;
dim3 cudaBlockSize(1,1,1);
dim3 cudaGridSize(1,1,1);
printf("scan_level = %i\n",scan_level);
scan_level++;
cudaBlockSize.x = PRESCAN_BLOCK_SIZE;
cudaGridSize.x = (nptcls_bin+8*PRESCAN_BLOCK_SIZE-1)/(8*PRESCAN_BLOCK_SIZE);
cudaGridSize.y = nbins;
// Allocate space to store the blockwise results
cudaMatrixui gridoffsets(cudaGridSize.x,nbins);
// Scan
CUDA_SAFE_KERNEL((binning_prescan<<<cudaGridSize,cudaBlockSize>>>(
blockoffsets,gridoffsets,nptcls_bin)));
// If multiple blocks are used then we have to calculate the cumulative sum of the results of each block
if(cudaGridSize.x > 1)
{
// Recurse
bin_scan(gridoffsets,cudaGridSize.x,nbins);
// Add the block totals to each of the elements in the block
CUDA_SAFE_KERNEL((bin_scan_back_add<<<cudaGridSize,cudaBlockSize>>>(
blockoffsets,gridoffsets,nptcls_bin)));
}
// Free memory
gridoffsets.cudaMatrixFree();
}
|
28f4e2d6dc05fd9f31389fa6f12720a8424b2226.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime_api.h"
#include <stdio.h>
hipError_t multiply8WithCuda(unsigned char* input1, unsigned char* input2, unsigned char* output, int xSize, int ySize);
int verify(unsigned char* input, unsigned char* output, int xSize, int ySize);
int verify(unsigned char* GoldInput, unsigned char* CudaInput, int xSize, int ySize) {
for (int i = 0; i < xSize * ySize; i++) {
if (GoldInput[i] != CudaInput[i]) {
printf("GoldInput[%d] = %d CInput[%d]=%d \n", i, GoldInput[i], i, CudaInput[i]);
return(1);
}
}
return(0);
}
void multiply8WithC(unsigned char* in1, unsigned char* in2, unsigned char* out, int x_size, int y_size) {
size_t x, y;
if (y_size != x_size) throw "Error! Multiplication unavailable";
for (y = 0; y < y_size; y += 1) {
for (x = 0; x < x_size; x += 1) {
size_t pos = y * x_size + x;
for (size_t i = 0; i < x_size; i += 1) {
*(out + pos) += *(in1 + y * x_size + i) * *(in2 + i * x_size + x);
}
}
}
}
__global__ void kernelMultiply8(unsigned char* in1, unsigned char* in2, unsigned char* out, int size) {
int xLoc = threadIdx.x;
int xWidth = blockDim.x;
int it = blockIdx.x;
int yLoc = blockIdx.y;
int index = xLoc + yLoc * xWidth;
if (index < size) {
// I think I can improve this one. This will be improved and included in next homework handin.
// I think I can improve this one. This will be improved and included in next homework handin.
size_t pos = yLoc * xWidth + xLoc;
for (size_t i = 0; i < xWidth; i += 1) {
*(out + pos) += *(in1 + yLoc * xWidth + i) * *(in2 + i * xWidth + xLoc);
}
// I think I can improve this one. This will be improved and included in next homework handin.
// I think I can improve this one. This will be improved and included in next homework handin.
}
}
int main() {
unsigned char* input1, * input2, * CudaOutput, * GoldOutput;
int xSize, ySize;
xSize = 32;
ySize = 32;
input1 = new unsigned char[xSize * ySize];
input2 = new unsigned char[xSize * ySize];
CudaOutput = new unsigned char[xSize * ySize] { 0 };
GoldOutput = new unsigned char[xSize * ySize] { 0 };
int i, j;
printf("xSize=%d ySize=%d \n", xSize, ySize);
FILE* fp;
//fp = fopen("barbara_gray.raw", "rb");
//fread(input, xSize, ySize, fp);
for (int i = 0; i < ySize; i++)
for (int j = 0; j < xSize; j++) {
input1[i * xSize + j] = (i * j * j * j) % 255;
input2[i * xSize + j] = (i * j) % 255;
}
multiply8WithC(input1, input2, GoldOutput, xSize, ySize);
// Add vectors in parallel.
hipError_t cudaStatus = multiply8WithCuda(input1, input2, CudaOutput, xSize, ySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "invert8WithCuda failed!");
return 1;
}
int error = verify(GoldOutput, CudaOutput, xSize, ySize);
if (error != 0)
printf("Verify Failed \n");
else
printf("Verify Successful \n");
fp = fopen("COutput.raw", "wb");
fwrite(GoldOutput, xSize, ySize, fp);
fclose(fp);
fp = fopen("CudaOutput.raw", "wb");
fwrite(CudaOutput, xSize, ySize, fp);
fclose(fp);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
delete[] CudaOutput;
delete[] GoldOutput;
delete[] input1;
delete[] input2;
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t multiply8WithCuda(unsigned char* input1, unsigned char* input2, unsigned char* output, int xSize, int ySize) {
unsigned char* dev_input1 = 0;
unsigned char* dev_input2 = 0;
unsigned char* dev_output = 0;
// hipProfilerInitialize();
unsigned int xysize = xSize * ySize;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
hipDeviceProp_t prop;
int count;
dim3 blocks, threads;
threads.x = xSize;
threads.y = 1;
//512x512 : along X 512/8 = 64 thread blocks Alon gY 64 blocks
blocks.x = 1;
blocks.y = ySize;
printf("blocks.x = %d blocks.y=%d \n", blocks.x, blocks.y);
printf("threads.x = %d threads.y=%d \n", threads.x, threads.y);
hipGetDeviceCount(&count);
printf("Count = %d\n", count);
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
hipEventRecord(start, 0);
// Allocate GPU buffers for two input .
cudaStatus = hipMalloc((void**)&dev_input1, xysize * sizeof(char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_input2, xysize * sizeof(char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_output, xysize * sizeof(char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_input1, input1, xysize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_input2, input2, xysize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
hipProfilerStart();
// Launch a kernel on the GPU with one thread for each element.
kernelMultiply8 __dim__(blocks, threads) (dev_input1, dev_input2, dev_output, xysize);
hipProfilerStop();
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching invert8Kernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(output, dev_output, xysize * sizeof(char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float cudaElapsedTime;
hipEventElapsedTime(&cudaElapsedTime, start, stop);
printf("Time for execution = %3.1f ms \n", cudaElapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
Error:
hipFree(dev_input1);
hipFree(dev_input2);
hipFree(dev_output);
return cudaStatus;
}
| 28f4e2d6dc05fd9f31389fa6f12720a8424b2226.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_profiler_api.h"
#include <stdio.h>
cudaError_t multiply8WithCuda(unsigned char* input1, unsigned char* input2, unsigned char* output, int xSize, int ySize);
int verify(unsigned char* input, unsigned char* output, int xSize, int ySize);
int verify(unsigned char* GoldInput, unsigned char* CudaInput, int xSize, int ySize) {
for (int i = 0; i < xSize * ySize; i++) {
if (GoldInput[i] != CudaInput[i]) {
printf("GoldInput[%d] = %d CInput[%d]=%d \n", i, GoldInput[i], i, CudaInput[i]);
return(1);
}
}
return(0);
}
void multiply8WithC(unsigned char* in1, unsigned char* in2, unsigned char* out, int x_size, int y_size) {
size_t x, y;
if (y_size != x_size) throw "Error! Multiplication unavailable";
for (y = 0; y < y_size; y += 1) {
for (x = 0; x < x_size; x += 1) {
size_t pos = y * x_size + x;
for (size_t i = 0; i < x_size; i += 1) {
*(out + pos) += *(in1 + y * x_size + i) * *(in2 + i * x_size + x);
}
}
}
}
__global__ void kernelMultiply8(unsigned char* in1, unsigned char* in2, unsigned char* out, int size) {
int xLoc = threadIdx.x;
int xWidth = blockDim.x;
int it = blockIdx.x;
int yLoc = blockIdx.y;
int index = xLoc + yLoc * xWidth;
if (index < size) {
// I think I can improve this one. This will be improved and included in next homework handin.
// I think I can improve this one. This will be improved and included in next homework handin.
size_t pos = yLoc * xWidth + xLoc;
for (size_t i = 0; i < xWidth; i += 1) {
*(out + pos) += *(in1 + yLoc * xWidth + i) * *(in2 + i * xWidth + xLoc);
}
// I think I can improve this one. This will be improved and included in next homework handin.
// I think I can improve this one. This will be improved and included in next homework handin.
}
}
int main() {
unsigned char* input1, * input2, * CudaOutput, * GoldOutput;
int xSize, ySize;
xSize = 32;
ySize = 32;
input1 = new unsigned char[xSize * ySize];
input2 = new unsigned char[xSize * ySize];
CudaOutput = new unsigned char[xSize * ySize] { 0 };
GoldOutput = new unsigned char[xSize * ySize] { 0 };
int i, j;
printf("xSize=%d ySize=%d \n", xSize, ySize);
FILE* fp;
//fp = fopen("barbara_gray.raw", "rb");
//fread(input, xSize, ySize, fp);
for (int i = 0; i < ySize; i++)
for (int j = 0; j < xSize; j++) {
input1[i * xSize + j] = (i * j * j * j) % 255;
input2[i * xSize + j] = (i * j) % 255;
}
multiply8WithC(input1, input2, GoldOutput, xSize, ySize);
// Add vectors in parallel.
cudaError_t cudaStatus = multiply8WithCuda(input1, input2, CudaOutput, xSize, ySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "invert8WithCuda failed!");
return 1;
}
int error = verify(GoldOutput, CudaOutput, xSize, ySize);
if (error != 0)
printf("Verify Failed \n");
else
printf("Verify Successful \n");
fp = fopen("COutput.raw", "wb");
fwrite(GoldOutput, xSize, ySize, fp);
fclose(fp);
fp = fopen("CudaOutput.raw", "wb");
fwrite(CudaOutput, xSize, ySize, fp);
fclose(fp);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
delete[] CudaOutput;
delete[] GoldOutput;
delete[] input1;
delete[] input2;
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t multiply8WithCuda(unsigned char* input1, unsigned char* input2, unsigned char* output, int xSize, int ySize) {
unsigned char* dev_input1 = 0;
unsigned char* dev_input2 = 0;
unsigned char* dev_output = 0;
// cudaProfilerInitialize();
unsigned int xysize = xSize * ySize;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaDeviceProp prop;
int count;
dim3 blocks, threads;
threads.x = xSize;
threads.y = 1;
//512x512 : along X 512/8 = 64 thread blocks Alon gY 64 blocks
blocks.x = 1;
blocks.y = ySize;
printf("blocks.x = %d blocks.y=%d \n", blocks.x, blocks.y);
printf("threads.x = %d threads.y=%d \n", threads.x, threads.y);
cudaGetDeviceCount(&count);
printf("Count = %d\n", count);
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaEventRecord(start, 0);
// Allocate GPU buffers for two input .
cudaStatus = cudaMalloc((void**)&dev_input1, xysize * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_input2, xysize * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_output, xysize * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_input1, input1, xysize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_input2, input2, xysize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaProfilerStart();
// Launch a kernel on the GPU with one thread for each element.
kernelMultiply8 __dim__(blocks, threads) (dev_input1, dev_input2, dev_output, xysize);
cudaProfilerStop();
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching invert8Kernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(output, dev_output, xysize * sizeof(char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float cudaElapsedTime;
cudaEventElapsedTime(&cudaElapsedTime, start, stop);
printf("Time for execution = %3.1f ms \n", cudaElapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
Error:
cudaFree(dev_input1);
cudaFree(dev_input2);
cudaFree(dev_output);
return cudaStatus;
}
|
bdf1d65d47b56a0d5023b5b7ca1bc4c43c0d8e48.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <cmath>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
inline int opt_n_threads(int work_size) {
const int pow_2 = ::log(static_cast<double>(work_size)) / ::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
#define CUDA_CHECK_ERRORS() \
do { \
hipError_t err = hipGetLastError(); \
if (hipSuccess != err) { \
fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \
hipGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \
__FILE__); \
exit(-1); \
} \
} while (0)
inline dim3 opt_block_config(int x, int y) {
const int x_threads = opt_n_threads(x);
const int y_threads =
max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1);
dim3 block_config(x_threads, y_threads, 1);
return block_config;
}
// input: points(b, c, n) idx(b, npoints, nsample)
// output: out(b, c, npoints, nsample)
__global__ void group_points_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
int batch_index = blockIdx.x;
points += batch_index * n * c;
idx += batch_index * npoints * nsample;
out += batch_index * npoints * nsample * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
out[(l * npoints + j) * nsample + k] = points[l * n + ii];
}
}
}
void group_points_cuda_forward(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx,float *out) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( group_points_kernel), dim3(b), dim3(opt_block_config(npoints, c)), 0, stream,
b, c, n, npoints, nsample, points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample)
// output: grad_points(b, c, n)
__global__ void group_points_grad_kernel(int b, int c, int n, int npoints,
int nsample, const float *__restrict__ grad_out, const int *__restrict__ idx,
float *__restrict__ grad_points) {
int batch_index = blockIdx.x;
grad_out += batch_index * npoints * nsample * c;
idx += batch_index * npoints * nsample;
grad_points += batch_index * n * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
atomicAdd(grad_points + l * n + ii,
grad_out[(l * npoints + j) * nsample + k]);
}
}
}
void group_points_cuda_backward(int b, int c, int n, int npoints,
int nsample, const float *grad_out,const int *idx, float *grad_points) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( group_points_grad_kernel), dim3(b), dim3(opt_block_config(npoints, c)), 0, stream,
b, c, n, npoints, nsample, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
| bdf1d65d47b56a0d5023b5b7ca1bc4c43c0d8e48.cu | #include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cmath>
#include <cuda.h>
#include <cuda_runtime.h>
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
#define CUDA_CHECK_ERRORS() \
do { \
cudaError_t err = cudaGetLastError(); \
if (cudaSuccess != err) { \
fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \
cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \
__FILE__); \
exit(-1); \
} \
} while (0)
inline dim3 opt_block_config(int x, int y) {
const int x_threads = opt_n_threads(x);
const int y_threads =
max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1);
dim3 block_config(x_threads, y_threads, 1);
return block_config;
}
// input: points(b, c, n) idx(b, npoints, nsample)
// output: out(b, c, npoints, nsample)
__global__ void group_points_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
int batch_index = blockIdx.x;
points += batch_index * n * c;
idx += batch_index * npoints * nsample;
out += batch_index * npoints * nsample * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
out[(l * npoints + j) * nsample + k] = points[l * n + ii];
}
}
}
void group_points_cuda_forward(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx,float *out) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_kernel<<<b, opt_block_config(npoints, c), 0, stream>>>(
b, c, n, npoints, nsample, points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample)
// output: grad_points(b, c, n)
__global__ void group_points_grad_kernel(int b, int c, int n, int npoints,
int nsample, const float *__restrict__ grad_out, const int *__restrict__ idx,
float *__restrict__ grad_points) {
int batch_index = blockIdx.x;
grad_out += batch_index * npoints * nsample * c;
idx += batch_index * npoints * nsample;
grad_points += batch_index * n * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
atomicAdd(grad_points + l * n + ii,
grad_out[(l * npoints + j) * nsample + k]);
}
}
}
void group_points_cuda_backward(int b, int c, int n, int npoints,
int nsample, const float *grad_out,const int *idx, float *grad_points) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_grad_kernel<<<b, opt_block_config(npoints, c), 0, stream>>>(
b, c, n, npoints, nsample, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
|
c23abd23bdf5bc2dfac1f61ad56382e4e4ad8722.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "matrix_lib.h"
#include <immintrin.h>
#include <pthread.h>
#include <hip/hip_runtime.h>
int threads_per_block = 256;
int max_blocks_per_grid = 4096;
//Threads per block = tpb, mbpg = max blocks per grid
int set_grid_size(int tpb, int mbpg){
if(tpb > 1024 || mbpg > 65535){
return 0;
}
else{
threads_per_block = tpb;
max_blocks_per_grid = mbpg;
return 1;
}
}
//scalar multi gpu
__global__
void escalar(int n, float *d_x, float escalar)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
d_x[i] = d_x[i] * escalar;
}
}
//matrix matrix multi
__global__
void matrix_mult(int n, float *d_A, float *d_B, float *d_C, int widthA, int widthB, int widthC)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
//linha de C = int(index/C.width)
//linha de A = (linha de C)*A.width
//coluna de C = index%d_C->width
int cLine = i/widthC;
int aLine = cLine*widthA;
int colC = i%widthC;
//Zerando c
d_C[i] = 0.0;
//Iterando pela linha de A e achando resultado em um C
int k = 0;
for(int j = aLine; j < aLine + widthA; j++){
d_C[i] += d_A[j] * d_B[k*widthB + colC];
k++;
}
}
}
void preenche_matrix(Matrix *matrix, float val){
unsigned long int m = matrix->height, n = matrix->width;
int i=0;
for(i=0;i<m*n; i++){
matrix->h_rows[i] = val;
}
}
int scalar_matrix_mult(float scalar_value, Matrix* matrix){
unsigned long int m = matrix->height, n = matrix->width;
if((m%8!=0)||(n%8!=0)){
return 0;
}
int blockSize = threads_per_block;
int numBlocks = (m*n + blockSize - 1) / blockSize;
if (numBlocks > max_blocks_per_grid) numBlocks = max_blocks_per_grid;
hipLaunchKernelGGL(( escalar), dim3(numBlocks), dim3(blockSize), 0, 0, m*n, matrix->d_rows, scalar_value);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
return 1;
};
int matrix_matrix_mult(Matrix *matrixA, Matrix *matrixB, Matrix *matrixC){
unsigned long int m = matrixA->height, q = matrixB->width, n = matrixA->width;
if((m%8!=0)||(q%8!=0)||(n%8!=0)||(matrixA->width!=matrixB->height)){
return 0;
}
int blockSize = threads_per_block;
int numBlocks = (matrixC->width*matrixC->height + blockSize - 1) / blockSize;
if (numBlocks > max_blocks_per_grid) numBlocks = max_blocks_per_grid;
hipLaunchKernelGGL(( matrix_mult), dim3(numBlocks), dim3(blockSize), 0, 0, matrixC->width*matrixC->height, matrixA->d_rows, matrixB->d_rows, matrixC->d_rows, matrixA->width, matrixB->width, matrixC->width);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
return 1;
}
void mostra_matrix(Matrix *matrix){
unsigned long int m = matrix->height, n = matrix->width;
int i=0,j=0;
printf("[ ");
for(i=0;i<m; i++){
for(j=0; j<n; j++){
printf(" %f ",matrix->h_rows[i*n + j]);
}
printf("\n");
}
printf("]\n");
}
| c23abd23bdf5bc2dfac1f61ad56382e4e4ad8722.cu | #include <stdio.h>
#include <stdlib.h>
#include "matrix_lib.h"
#include <immintrin.h>
#include <pthread.h>
#include <cuda_runtime.h>
int threads_per_block = 256;
int max_blocks_per_grid = 4096;
//Threads per block = tpb, mbpg = max blocks per grid
int set_grid_size(int tpb, int mbpg){
if(tpb > 1024 || mbpg > 65535){
return 0;
}
else{
threads_per_block = tpb;
max_blocks_per_grid = mbpg;
return 1;
}
}
//scalar multi gpu
__global__
void escalar(int n, float *d_x, float escalar)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
d_x[i] = d_x[i] * escalar;
}
}
//matrix matrix multi
__global__
void matrix_mult(int n, float *d_A, float *d_B, float *d_C, int widthA, int widthB, int widthC)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
//linha de C = int(index/C.width)
//linha de A = (linha de C)*A.width
//coluna de C = index%d_C->width
int cLine = i/widthC;
int aLine = cLine*widthA;
int colC = i%widthC;
//Zerando c
d_C[i] = 0.0;
//Iterando pela linha de A e achando resultado em um C
int k = 0;
for(int j = aLine; j < aLine + widthA; j++){
d_C[i] += d_A[j] * d_B[k*widthB + colC];
k++;
}
}
}
void preenche_matrix(Matrix *matrix, float val){
unsigned long int m = matrix->height, n = matrix->width;
int i=0;
for(i=0;i<m*n; i++){
matrix->h_rows[i] = val;
}
}
int scalar_matrix_mult(float scalar_value, Matrix* matrix){
unsigned long int m = matrix->height, n = matrix->width;
if((m%8!=0)||(n%8!=0)){
return 0;
}
int blockSize = threads_per_block;
int numBlocks = (m*n + blockSize - 1) / blockSize;
if (numBlocks > max_blocks_per_grid) numBlocks = max_blocks_per_grid;
escalar<<<numBlocks, blockSize>>>(m*n, matrix->d_rows, scalar_value);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
return 1;
};
int matrix_matrix_mult(Matrix *matrixA, Matrix *matrixB, Matrix *matrixC){
unsigned long int m = matrixA->height, q = matrixB->width, n = matrixA->width;
if((m%8!=0)||(q%8!=0)||(n%8!=0)||(matrixA->width!=matrixB->height)){
return 0;
}
int blockSize = threads_per_block;
int numBlocks = (matrixC->width*matrixC->height + blockSize - 1) / blockSize;
if (numBlocks > max_blocks_per_grid) numBlocks = max_blocks_per_grid;
matrix_mult<<<numBlocks, blockSize>>>(matrixC->width*matrixC->height, matrixA->d_rows, matrixB->d_rows, matrixC->d_rows, matrixA->width, matrixB->width, matrixC->width);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
return 1;
}
void mostra_matrix(Matrix *matrix){
unsigned long int m = matrix->height, n = matrix->width;
int i=0,j=0;
printf("[ ");
for(i=0;i<m; i++){
for(j=0; j<n; j++){
printf(" %f ",matrix->h_rows[i*n + j]);
}
printf("\n");
}
printf("]\n");
}
|
dafc01f3de0f83eae24c2ce117949d08e3e49580.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <regularizers/l1_regularizer.hpp>
#include <utility>
#include <utils.cuh>
namespace HugeCTR {
namespace {
template <typename T>
void launch_initialize_wgrad_kernel(const float* weight, T* wgrad, int num_elements, int batch_size,
float lambda, int n_sms, hipStream_t stream) {
auto op = [lambda, batch_size] __device__(const float in) {
return (in > 0.0f) ? (lambda / batch_size) : -(lambda / batch_size);
};
hipLaunchKernelGGL(( transform_array), dim3(n_sms * 4), dim3(512), 0, stream, weight, wgrad, num_elements, op);
}
} // namespace
template <typename T>
L1Regularizer<T>::L1Regularizer(const Tensor2<float>& weight_buff, const Tensor2<T>& wgrad_buff,
const int batch_size, const float lambda,
const std::shared_ptr<GPUResource>& gpu_resource)
: Regularizer<T>(weight_buff, wgrad_buff, batch_size, gpu_resource), lambda_(lambda) {}
template <typename T>
void L1Regularizer<T>::do_compute_rterm(const float* weight, float* h_rterm, int num_elements) {
CK_CUBLAS_THROW_(
hipblasSasum(Regularizer<T>::get_gpu().get_cublas_handle(), num_elements, weight, 1, h_rterm));
const float alpha = lambda_ / Regularizer<T>::get_batch_size();
*h_rterm *= alpha;
}
template <typename T>
void L1Regularizer<T>::do_initialize_wgrad(const float* weight, T* wgrad, int num_elements) {
launch_initialize_wgrad_kernel(weight, wgrad, num_elements, Regularizer<T>::get_batch_size(),
lambda_, Regularizer<T>::get_gpu().get_sm_count(),
Regularizer<T>::get_gpu().get_stream());
}
template class L1Regularizer<__half>;
template class L1Regularizer<float>;
} // namespace HugeCTR
| dafc01f3de0f83eae24c2ce117949d08e3e49580.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <regularizers/l1_regularizer.hpp>
#include <utility>
#include <utils.cuh>
namespace HugeCTR {
namespace {
template <typename T>
void launch_initialize_wgrad_kernel(const float* weight, T* wgrad, int num_elements, int batch_size,
float lambda, int n_sms, cudaStream_t stream) {
auto op = [lambda, batch_size] __device__(const float in) {
return (in > 0.0f) ? (lambda / batch_size) : -(lambda / batch_size);
};
transform_array<<<n_sms * 4, 512, 0, stream>>>(weight, wgrad, num_elements, op);
}
} // namespace
template <typename T>
L1Regularizer<T>::L1Regularizer(const Tensor2<float>& weight_buff, const Tensor2<T>& wgrad_buff,
const int batch_size, const float lambda,
const std::shared_ptr<GPUResource>& gpu_resource)
: Regularizer<T>(weight_buff, wgrad_buff, batch_size, gpu_resource), lambda_(lambda) {}
template <typename T>
void L1Regularizer<T>::do_compute_rterm(const float* weight, float* h_rterm, int num_elements) {
CK_CUBLAS_THROW_(
cublasSasum(Regularizer<T>::get_gpu().get_cublas_handle(), num_elements, weight, 1, h_rterm));
const float alpha = lambda_ / Regularizer<T>::get_batch_size();
*h_rterm *= alpha;
}
template <typename T>
void L1Regularizer<T>::do_initialize_wgrad(const float* weight, T* wgrad, int num_elements) {
launch_initialize_wgrad_kernel(weight, wgrad, num_elements, Regularizer<T>::get_batch_size(),
lambda_, Regularizer<T>::get_gpu().get_sm_count(),
Regularizer<T>::get_gpu().get_stream());
}
template class L1Regularizer<__half>;
template class L1Regularizer<float>;
} // namespace HugeCTR
|
6cbbb2220d2af949e209297b6a41103e0881c86f.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapeFacetedEllipsoid.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeUnion<ShapeFacetedEllipsoid>
template hipError_t gpu_hpmc_free_volume<ShapeUnion<ShapeFacetedEllipsoid> >(const hpmc_free_volume_args_t &args,
const typename ShapeUnion<ShapeFacetedEllipsoid> ::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeUnion<ShapeFacetedEllipsoid> >(const hpmc_args_t& args,
const typename ShapeUnion<ShapeFacetedEllipsoid> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeFacetedEllipsoid> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeFacetedEllipsoid> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeFacetedEllipsoid> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeFacetedEllipsoid> ::param_type *d_params);
template hipError_t gpu_hpmc_insert_depletants_queue<ShapeUnion<ShapeFacetedEllipsoid> >(const hpmc_implicit_args_new_t& args,
const typename ShapeUnion<ShapeFacetedEllipsoid> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapeUnion<ShapeFacetedEllipsoid> >(const hpmc_implicit_args_new_t& args,
const typename ShapeUnion<ShapeFacetedEllipsoid> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| 6cbbb2220d2af949e209297b6a41103e0881c86f.cu | // Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapeFacetedEllipsoid.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeUnion<ShapeFacetedEllipsoid>
template cudaError_t gpu_hpmc_free_volume<ShapeUnion<ShapeFacetedEllipsoid> >(const hpmc_free_volume_args_t &args,
const typename ShapeUnion<ShapeFacetedEllipsoid> ::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeUnion<ShapeFacetedEllipsoid> >(const hpmc_args_t& args,
const typename ShapeUnion<ShapeFacetedEllipsoid> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeFacetedEllipsoid> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeFacetedEllipsoid> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeFacetedEllipsoid> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeFacetedEllipsoid> ::param_type *d_params);
template cudaError_t gpu_hpmc_insert_depletants_queue<ShapeUnion<ShapeFacetedEllipsoid> >(const hpmc_implicit_args_new_t& args,
const typename ShapeUnion<ShapeFacetedEllipsoid> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapeUnion<ShapeFacetedEllipsoid> >(const hpmc_implicit_args_new_t& args,
const typename ShapeUnion<ShapeFacetedEllipsoid> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
76bf36ab59b82cb22b81cbc8fb37728d48301610.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "../include/im2col.cuh"
#include "../include/cudaMatrix.cuh"
#include "../include/convcudamatrix.cuh"
int main(void)
{
float data1[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63};
float filter[] = { 0, 1, 2, 3, 4, 5, 6, 7};
float data2[] = {0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.};
cudaConvMatrix cv1(data1,2,4,4,2);
cudaConvMatrix fl(filter,2,2,2,1);
cudaConvMatrix tgt(data2, 2,4,4,1);
cv1.convolve(fl, true, 1, tgt);
float * result = new float[4*4*2];
tgt.getDeviceData(result);
for (int i = 0; i < 2; i ++) {
for (int m = 0; m < 4; m++) {
for (int n = 0; n < 4; n++ ) {
for (int k = 0; k < 1; k ++) {
std::cout << result[i*4*4*1 + m*4 + n+k] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
}
std::cout << "\n";
}
// float *col = new float[col_width*col_height];
// float *ddata, *dcol;
// hipMalloc((void**)&ddata, height*width*sizeof(float));
// hipMalloc((void**)&dcol, col_height*col_width*sizeof(float));
// hipMemcpy(ddata,&data1,height*width*sizeof(float),hipMemcpyHostToDevice);
// im2col_gpu(ddata,channels,height,width,kern_sz,stride,pad,col_height,col_width,dcol);
// hipMemcpy(col,dcol,col_height*col_width*sizeof(float),hipMemcpyDeviceToHost);
// hipFree(ddata);
// hipFree(dcol);
return 0;
} | 76bf36ab59b82cb22b81cbc8fb37728d48301610.cu | #include <iostream>
#include "../include/im2col.cuh"
#include "../include/cudaMatrix.cuh"
#include "../include/convcudamatrix.cuh"
int main(void)
{
float data1[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63};
float filter[] = { 0, 1, 2, 3, 4, 5, 6, 7};
float data2[] = {0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.};
cudaConvMatrix cv1(data1,2,4,4,2);
cudaConvMatrix fl(filter,2,2,2,1);
cudaConvMatrix tgt(data2, 2,4,4,1);
cv1.convolve(fl, true, 1, tgt);
float * result = new float[4*4*2];
tgt.getDeviceData(result);
for (int i = 0; i < 2; i ++) {
for (int m = 0; m < 4; m++) {
for (int n = 0; n < 4; n++ ) {
for (int k = 0; k < 1; k ++) {
std::cout << result[i*4*4*1 + m*4 + n+k] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
}
std::cout << "\n";
}
// float *col = new float[col_width*col_height];
// float *ddata, *dcol;
// cudaMalloc((void**)&ddata, height*width*sizeof(float));
// cudaMalloc((void**)&dcol, col_height*col_width*sizeof(float));
// cudaMemcpy(ddata,&data1,height*width*sizeof(float),cudaMemcpyHostToDevice);
// im2col_gpu(ddata,channels,height,width,kern_sz,stride,pad,col_height,col_width,dcol);
// cudaMemcpy(col,dcol,col_height*col_width*sizeof(float),cudaMemcpyDeviceToHost);
// cudaFree(ddata);
// cudaFree(dcol);
return 0;
} |
e1be3c5b74ccd0d7bb9ae034a9f56a8a9496c67f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void copyPixelsInSlicesRGB(float *ptrinput0, float *ptrkslices0, int dH, int dW, int kH, int kW, int size1, int size2, int isize1, int isize2, int nInputPlane, int padleft, int padright, int padup, int paddown, int inputstr0, int kslicesstr0, int batchsize)
{
// each block does one pixel of the input image
// each kernel slice is represented by its upper-left coordinates
const int pixi=blockIdx.x;
const int pixj=blockIdx.y*blockDim.y + threadIdx.y;
const int tidx=threadIdx.x;
const int batchindex=blockIdx.z*blockDim.z+threadIdx.z;
int i,j;
int imin, jmin, imax, jmax;
int inputoffset, ksliceoffset;
// step 1 : find which kernel slices contain the values of the pixel
__shared__ int _imin, _jmin[32], _imax, _jmax[32], _inputoffset[32][3], _ksliceoffset[32][3];
if(threadIdx.z==0)
{
imin=(pixi - (kH - 1) + (dH -1))/dH > 0 ? (pixi - (kH - 1) + (dH -1))/dH : 0 ;
jmin=(pixj - (kW - 1) + (dW -1))/dW > 0 ? (pixj - (kW - 1) + (dW -1))/dW : 0 ;
imax= pixi / dH < size1 ? pixi / dH : size1 - 1 ;
jmax= pixj / dW < size2 ? pixj / dW : size2 - 1 ;
if(threadIdx.x==0 && threadIdx.y==0)
{
_imin=imin;
_imax=imax;
}
if(threadIdx.x==0)
{
_jmin[threadIdx.y]=jmin;
_jmax[threadIdx.y]=jmax;
}
inputoffset = inputstr0*blockIdx.z*blockDim.z + ((pixi-padup) * isize2 + (pixj-padleft)) * nInputPlane ;
ksliceoffset= kslicesstr0*blockIdx.z*blockDim.z + ((imin * size2 + jmin) * kH * kW + (pixi - imin * dH) * kW + (pixj - jmin*dW) ) * nInputPlane;
_inputoffset[threadIdx.y][threadIdx.x]=inputoffset;
_ksliceoffset[threadIdx.y][threadIdx.x]=ksliceoffset;
}
__syncthreads();
if(batchindex >= batchsize) return;
if(pixj > isize2 + padleft + padright -1) return;
if(threadIdx.z>0)
{
imin=_imin;
imax=_imax;
jmin=_jmin[threadIdx.y];
jmax=_jmax[threadIdx.y];
inputoffset=_inputoffset[threadIdx.y][threadIdx.x];
ksliceoffset=_ksliceoffset[threadIdx.y][threadIdx.x];
}
// step 2 : move the pointers
// this one goes to where the pixel is at
ptrinput0 += inputoffset+inputstr0*threadIdx.z ;
ptrkslices0 += ksliceoffset+kslicesstr0*threadIdx.z ;
const int stridej = (kH*kW - dW) * nInputPlane;
const int stridei = (size2*kH-dH) * kW *nInputPlane - (jmax-jmin+1) * stridej ;
bool zeropad = pixi<padup || pixi>isize1-1+padup || pixj<padleft || pixj>isize2-1+padleft ;
// read pixel
// load the stuff first...
//for (b=0; b<batchsize; b++)
//{
float * ptrinput = ptrinput0;
float * ptrkslices = ptrkslices0;
float pixvalue;
if (zeropad) {
pixvalue=0;
}
else {
pixvalue=ptrinput[tidx];
}
// write to memory
for(i=imin; i<imax+1; i++) {
for(j=jmin; j<jmax+1; j++) {
if(zeropad)
{
ptrkslices[tidx]=0;
}
else {
ptrkslices[tidx]=pixvalue;
}
ptrkslices += stridej;
}
ptrkslices += stridei;
}
//}
} | e1be3c5b74ccd0d7bb9ae034a9f56a8a9496c67f.cu | #include "includes.h"
__global__ void copyPixelsInSlicesRGB(float *ptrinput0, float *ptrkslices0, int dH, int dW, int kH, int kW, int size1, int size2, int isize1, int isize2, int nInputPlane, int padleft, int padright, int padup, int paddown, int inputstr0, int kslicesstr0, int batchsize)
{
// each block does one pixel of the input image
// each kernel slice is represented by its upper-left coordinates
const int pixi=blockIdx.x;
const int pixj=blockIdx.y*blockDim.y + threadIdx.y;
const int tidx=threadIdx.x;
const int batchindex=blockIdx.z*blockDim.z+threadIdx.z;
int i,j;
int imin, jmin, imax, jmax;
int inputoffset, ksliceoffset;
// step 1 : find which kernel slices contain the values of the pixel
__shared__ int _imin, _jmin[32], _imax, _jmax[32], _inputoffset[32][3], _ksliceoffset[32][3];
if(threadIdx.z==0)
{
imin=(pixi - (kH - 1) + (dH -1))/dH > 0 ? (pixi - (kH - 1) + (dH -1))/dH : 0 ;
jmin=(pixj - (kW - 1) + (dW -1))/dW > 0 ? (pixj - (kW - 1) + (dW -1))/dW : 0 ;
imax= pixi / dH < size1 ? pixi / dH : size1 - 1 ;
jmax= pixj / dW < size2 ? pixj / dW : size2 - 1 ;
if(threadIdx.x==0 && threadIdx.y==0)
{
_imin=imin;
_imax=imax;
}
if(threadIdx.x==0)
{
_jmin[threadIdx.y]=jmin;
_jmax[threadIdx.y]=jmax;
}
inputoffset = inputstr0*blockIdx.z*blockDim.z + ((pixi-padup) * isize2 + (pixj-padleft)) * nInputPlane ;
ksliceoffset= kslicesstr0*blockIdx.z*blockDim.z + ((imin * size2 + jmin) * kH * kW + (pixi - imin * dH) * kW + (pixj - jmin*dW) ) * nInputPlane;
_inputoffset[threadIdx.y][threadIdx.x]=inputoffset;
_ksliceoffset[threadIdx.y][threadIdx.x]=ksliceoffset;
}
__syncthreads();
if(batchindex >= batchsize) return;
if(pixj > isize2 + padleft + padright -1) return;
if(threadIdx.z>0)
{
imin=_imin;
imax=_imax;
jmin=_jmin[threadIdx.y];
jmax=_jmax[threadIdx.y];
inputoffset=_inputoffset[threadIdx.y][threadIdx.x];
ksliceoffset=_ksliceoffset[threadIdx.y][threadIdx.x];
}
// step 2 : move the pointers
// this one goes to where the pixel is at
ptrinput0 += inputoffset+inputstr0*threadIdx.z ;
ptrkslices0 += ksliceoffset+kslicesstr0*threadIdx.z ;
const int stridej = (kH*kW - dW) * nInputPlane;
const int stridei = (size2*kH-dH) * kW *nInputPlane - (jmax-jmin+1) * stridej ;
bool zeropad = pixi<padup || pixi>isize1-1+padup || pixj<padleft || pixj>isize2-1+padleft ;
// read pixel
// load the stuff first...
//for (b=0; b<batchsize; b++)
//{
float * ptrinput = ptrinput0;
float * ptrkslices = ptrkslices0;
float pixvalue;
if (zeropad) {
pixvalue=0;
}
else {
pixvalue=ptrinput[tidx];
}
// write to memory
for(i=imin; i<imax+1; i++) {
for(j=jmin; j<jmax+1; j++) {
if(zeropad)
{
ptrkslices[tidx]=0;
}
else {
ptrkslices[tidx]=pixvalue;
}
ptrkslices += stridej;
}
ptrkslices += stridei;
}
//}
} |
35e58e25088fa873920b4f9f899eaf8b8995bf3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <device_launch_parameters.h>
#include "caffe/common.hpp"
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/util/math_functions.hpp"
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <thrust/extrema.h>
#include <thrust/pair.h>
#include <thrust/count.h>
namespace caffe {
template<typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template<>
void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_mul<float16>(const int N, const float16* a, const float16* b, float16* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void square_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * a[index];
}
}
template<>
void caffe_gpu_square<float>(const int N, const float* a, float* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( square_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_square<double>(const int N, const double* a, double* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( square_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_square<float16>(const int N, const float16* a, float16* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( square_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template<>
void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_div<float16>(const int N, const float16* a, const float16* b, float16* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template<>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_abs<float16>(const int N, const float16* a, float16* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template<>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_exp<float16>(const int N, const float16* a, float16* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template<>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, y);
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, y);
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_log<float16>(const int N, const float16* a, float16* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, y);
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template<>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_powx<float16>(const int N, const float16* a,
const float16 alpha, float16* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC_AUX(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
__global__ void popc_kernel(const int n, const float* a,
const float* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(a[index]) ^
static_cast<uint32_t>(b[index]));
}
}
__global__ void popcll_kernel(const int n, const double* a,
const double* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popcll(static_cast<uint64_t>(a[index]) ^
static_cast<uint64_t>(b[index]));
}
}
__global__ void popch_kernel(const int n, const half* a,
const half* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(a[index].x() ^ b[index].x());
}
}
template<typename T, typename TR>
__global__
void convert_kernel(const unsigned int n, const T* in, TR* out) {
CUDA_KERNEL_LOOP(i, n) {
out[i] = in[i];
}
}
template<>
__global__
void convert_kernel(const unsigned int n, const half2* in, float2* out) {
CUDA_KERNEL_LOOP(i, n) {
out[i] = __half22float2(in[i]);
}
}
template<>
__global__
void convert_kernel(const unsigned int n, const float2* in, half2* out) {
CUDA_KERNEL_LOOP(i, n) {
out[i] = float22half2_clip(in[i]);
}
}
template<typename T, typename TR>
void caffe_gpu_convert(const unsigned int N, const T* in, TR* out) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( convert_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, in, out);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_convert<float, float16>(const unsigned int n,
const float* in, float16* out) {
hipStream_t stream = Caffe::thread_stream();
const unsigned int n2 = even(n) / 2;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( convert_kernel), dim3(CAFFE_GET_BLOCKS_HALF(n2)), dim3(CAFFE_CUDA_NUM_THREADS_HALF), 0, stream,
n2, reinterpret_cast<const float2*>(in), reinterpret_cast<half2*>(out));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_convert<float16, float>(const unsigned int n,
const float16* in, float* out) {
hipStream_t stream = Caffe::thread_stream();
const unsigned int n2 = even(n) / 2;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( convert_kernel), dim3(CAFFE_GET_BLOCKS_HALF(n2)), dim3(CAFFE_CUDA_NUM_THREADS_HALF), 0, stream,
n2, reinterpret_cast<const half2*>(in), reinterpret_cast<float2*>(out));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template void caffe_gpu_convert<double, float16>(const unsigned int n,
const double* in, float16* out);
template void caffe_gpu_convert<float16, double>(const unsigned int n,
const float16* in, double* out);
template void caffe_gpu_convert<double, float>(const unsigned int n,
const double* in, float* out);
template void caffe_gpu_convert<float, double>(const unsigned int n,
const float* in, double* out);
template<>
void caffe_gpu_convert<float, float>(const unsigned int n,
const float* in, float* out) {
caffe_copy(n, in, out);
}
template<>
void caffe_gpu_convert<double, double>(const unsigned int n,
const double* in, double* out) {
caffe_copy(n, in, out);
}
#ifndef CPU_ONLY
template<>
void caffe_gpu_convert<float16, float16>(const unsigned int n,
const float16* in, float16* out) {
caffe_copy(n, in, out);
}
#endif
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template<>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template<>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template<>
void caffe_gpu_rng_uniform<float16>(const int n, const float16 a,
const float16 b, float16* r) {
GPUMemory::Workspace rf(n * sizeof(float));
float* rfp = static_cast<float*>(rf.data());
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), rfp, n));
const float range = b - a;
if (range != 1.F) {
caffe_gpu_scal(n, range, rfp);
}
if (a != static_cast<float16>(0)) {
caffe_gpu_add_scalar(n, static_cast<float>(a), rfp);
}
caffe_gpu_convert(n, rfp, r);
}
template<>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) {
CURAND_CHECK(hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template<>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) {
CURAND_CHECK(hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
template<>
void caffe_gpu_rng_gaussian(const int n, const float16 mu, const float16 sigma, float16* r) {
GPUMemory::Workspace rf(n * sizeof(float));
float* rfp = static_cast<float*>(rf.data());
CURAND_CHECK(hiprandGenerateNormal(Caffe::curand_generator(), rfp, n, mu, sigma));
caffe_gpu_convert(n, rfp, r);
}
template<typename Dtype>
__global__ void caffe_gpu_eltwise_max_kernel(const int N, const Dtype alpha, const Dtype* x,
const Dtype beta, Dtype* y) {
CUDA_KERNEL_LOOP(index, N) {
y[index] = max(alpha * x[index], beta * y[index]);
}
}
template<>
void caffe_gpu_eltwise_max<float>(const int N, const float alpha, const float* x,
const float beta, float* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( caffe_gpu_eltwise_max_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
N, alpha, x, beta, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_eltwise_max<double>(const int N,
const double alpha, const double* x, const double beta, double* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( caffe_gpu_eltwise_max_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
N, alpha, x, beta, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
#ifndef CPU_ONLY
template<>
void caffe_gpu_eltwise_max<float16>(const int N,
const float16 alpha, const float16* x, const float16 beta, float16* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( caffe_gpu_eltwise_max_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
N, alpha, x, beta, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
#endif
template<typename Dtype>
__global__ void caffe_gpu_eltwise_min_kernel(const int N,
const Dtype alpha, const Dtype* x, const Dtype beta, Dtype* y) {
CUDA_KERNEL_LOOP(index, N) {
y[index] = min(alpha * x[index], beta * y[index]);
}
}
template<>
void caffe_gpu_eltwise_min<float>(const int N,
const float alpha, const float* x, const float beta, float* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( caffe_gpu_eltwise_min_kernel<float>) , dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, alpha, x, beta, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_eltwise_min<double>(const int N,
const double alpha, const double* x, const double beta, double* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( caffe_gpu_eltwise_min_kernel<double>) , dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, alpha, x, beta, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
#ifndef CPU_ONLY
template<>
void caffe_gpu_eltwise_min<float16>(const int N,
const float16 alpha, const float16* x, const float16 beta, float16* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( caffe_gpu_eltwise_min_kernel<float16>) , dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, alpha, x, beta, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
#endif
//Already defined above
//DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0)));
//DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(if_zero, y[index] = ((x[index] <= Dtype(0) && x[index] >= Dtype(-0) ) ? 1 : 0) );
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(if_nonzero, y[index] = ((x[index] > Dtype(0) || x[index] < Dtype(-0) ) ? 1 : 0) )
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(eltwise_multi, y[index] = y[index]*x[index] )
DEFINE_AND_INSTANTIATE_GPU_1NARY_FUNC(set, x[index] = value, value )
DEFINE_AND_INSTANTIATE_GPU_X2NARY_FUNC(zerout, y[index] = ((x[index] <= Dtype(threshold) && x[index] >= Dtype(-threshold)) ? Dtype(0) : x[index]), threshold )
template <typename Dtype>
struct CheckZeroFunctor {
CheckZeroFunctor(const Dtype threshold) : threshold(threshold) {
}
__host__ __device__ bool operator()(const Dtype& x) {
return (x<=(threshold) && x>=(-threshold));
}
const Dtype threshold;
};
//float16 is too small range for count. so use a int return type always.
template <typename Dtype>
int caffe_gpu_count_zero(const int N, const Dtype* x, Dtype threshold) {
CheckZeroFunctor<Dtype> check_zero(threshold);
thrust::device_ptr<const Dtype> pWrapper(x);
int count = thrust::count_if(pWrapper, pWrapper+N, check_zero);
return count;
}
template
int caffe_gpu_count_zero<float>(const int N, const float* x, float threshold);
template
int caffe_gpu_count_zero<double>(const int N, const double* x, double threshold);
template
int caffe_gpu_count_zero<float16>(const int N, const float16* x, float16 threshold);
struct transform_op_float16_to_float {
__host__ __device__ float operator()(const float16 v1) {
return float(v1);
}
};
template <typename Dtype>
Dtype caffe_gpu_min(const int N, const Dtype* x) {
thrust::device_ptr<const Dtype> pWrapper(x);
const thrust::device_ptr<const Dtype> min_val = thrust::min_element(pWrapper, pWrapper+N);
return *min_val;
}
template
float caffe_gpu_min(const int N, const float* x);
template
double caffe_gpu_min(const int N, const double* x);
//Avoid float16 errors, probably because thrust::greater(), thrust::lesser()
//are not available for float16: provide a different specializer here
template<>
float16 caffe_gpu_min(const int N, const float16* x){
thrust::device_ptr<const float16> pWrapper(x);
float min_val = thrust::transform_reduce(pWrapper, pWrapper+N, transform_op_float16_to_float(),
float(std::numeric_limits<float>::max()), thrust::less<float>());
return min_val;
}
template <typename Dtype>
Dtype caffe_gpu_max(const int N, const Dtype* x) {
thrust::device_ptr<const Dtype> pWrapper(x);
const thrust::device_ptr<const Dtype> max_val = thrust::max_element(pWrapper, pWrapper+N);
return *max_val;
}
template
float caffe_gpu_max(const int N, const float* x);
template
double caffe_gpu_max(const int N, const double* x);
//Avoid float16 errors, probably because thrust::greater(), thrust::lesser()
//are not available for float16: provide a different specializer here
template<>
float16 caffe_gpu_max(const int N, const float16* x) {
thrust::device_ptr<const float16> pWrapper(x);
float max_val = thrust::transform_reduce(pWrapper, pWrapper+N, transform_op_float16_to_float(),
float(std::numeric_limits<float>::lowest()), thrust::greater<float>());
return max_val;
}
} // namespace caffe
| 35e58e25088fa873920b4f9f899eaf8b8995bf3a.cu | #include <algorithm>
#include <device_launch_parameters.h>
#include "caffe/common.hpp"
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/util/math_functions.hpp"
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <thrust/extrema.h>
#include <thrust/pair.h>
#include <thrust/count.h>
namespace caffe {
template<typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template<>
void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_mul<float16>(const int N, const float16* a, const float16* b, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void square_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * a[index];
}
}
template<>
void caffe_gpu_square<float>(const int N, const float* a, float* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
square_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_square<double>(const int N, const double* a, double* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
square_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_square<float16>(const int N, const float16* a, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
square_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template<>
void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_div<float16>(const int N, const float16* a, const float16* b, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template<>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_abs<float16>(const int N, const float16* a, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template<>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double> <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_exp<float16>(const int N, const float16* a, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template<>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, a, y);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, y);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_log<float16>(const int N, const float16* a, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, y);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template<>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_powx<float16>(const int N, const float16* a,
const float16 alpha, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, alpha, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC_AUX(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
__global__ void popc_kernel(const int n, const float* a,
const float* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(a[index]) ^
static_cast<uint32_t>(b[index]));
}
}
__global__ void popcll_kernel(const int n, const double* a,
const double* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popcll(static_cast<uint64_t>(a[index]) ^
static_cast<uint64_t>(b[index]));
}
}
__global__ void popch_kernel(const int n, const half* a,
const half* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(a[index].x() ^ b[index].x());
}
}
template<typename T, typename TR>
__global__
void convert_kernel(const unsigned int n, const T* in, TR* out) {
CUDA_KERNEL_LOOP(i, n) {
out[i] = in[i];
}
}
template<>
__global__
void convert_kernel(const unsigned int n, const half2* in, float2* out) {
CUDA_KERNEL_LOOP(i, n) {
out[i] = __half22float2(in[i]);
}
}
template<>
__global__
void convert_kernel(const unsigned int n, const float2* in, half2* out) {
CUDA_KERNEL_LOOP(i, n) {
out[i] = float22half2_clip(in[i]);
}
}
template<typename T, typename TR>
void caffe_gpu_convert(const unsigned int N, const T* in, TR* out) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
convert_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, in, out);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_convert<float, float16>(const unsigned int n,
const float* in, float16* out) {
cudaStream_t stream = Caffe::thread_stream();
const unsigned int n2 = even(n) / 2;
// NOLINT_NEXT_LINE(whitespace/operators)
convert_kernel<<<CAFFE_GET_BLOCKS_HALF(n2), CAFFE_CUDA_NUM_THREADS_HALF, 0, stream>>>
(n2, reinterpret_cast<const float2*>(in), reinterpret_cast<half2*>(out));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_convert<float16, float>(const unsigned int n,
const float16* in, float* out) {
cudaStream_t stream = Caffe::thread_stream();
const unsigned int n2 = even(n) / 2;
// NOLINT_NEXT_LINE(whitespace/operators)
convert_kernel<<<CAFFE_GET_BLOCKS_HALF(n2), CAFFE_CUDA_NUM_THREADS_HALF, 0, stream>>>
(n2, reinterpret_cast<const half2*>(in), reinterpret_cast<float2*>(out));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template void caffe_gpu_convert<double, float16>(const unsigned int n,
const double* in, float16* out);
template void caffe_gpu_convert<float16, double>(const unsigned int n,
const float16* in, double* out);
template void caffe_gpu_convert<double, float>(const unsigned int n,
const double* in, float* out);
template void caffe_gpu_convert<float, double>(const unsigned int n,
const float* in, double* out);
template<>
void caffe_gpu_convert<float, float>(const unsigned int n,
const float* in, float* out) {
caffe_copy(n, in, out);
}
template<>
void caffe_gpu_convert<double, double>(const unsigned int n,
const double* in, double* out) {
caffe_copy(n, in, out);
}
#ifndef CPU_ONLY
template<>
void caffe_gpu_convert<float16, float16>(const unsigned int n,
const float16* in, float16* out) {
caffe_copy(n, in, out);
}
#endif
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template<>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template<>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template<>
void caffe_gpu_rng_uniform<float16>(const int n, const float16 a,
const float16 b, float16* r) {
GPUMemory::Workspace rf(n * sizeof(float));
float* rfp = static_cast<float*>(rf.data());
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), rfp, n));
const float range = b - a;
if (range != 1.F) {
caffe_gpu_scal(n, range, rfp);
}
if (a != static_cast<float16>(0)) {
caffe_gpu_add_scalar(n, static_cast<float>(a), rfp);
}
caffe_gpu_convert(n, rfp, r);
}
template<>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) {
CURAND_CHECK(curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template<>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) {
CURAND_CHECK(curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
template<>
void caffe_gpu_rng_gaussian(const int n, const float16 mu, const float16 sigma, float16* r) {
GPUMemory::Workspace rf(n * sizeof(float));
float* rfp = static_cast<float*>(rf.data());
CURAND_CHECK(curandGenerateNormal(Caffe::curand_generator(), rfp, n, mu, sigma));
caffe_gpu_convert(n, rfp, r);
}
template<typename Dtype>
__global__ void caffe_gpu_eltwise_max_kernel(const int N, const Dtype alpha, const Dtype* x,
const Dtype beta, Dtype* y) {
CUDA_KERNEL_LOOP(index, N) {
y[index] = max(alpha * x[index], beta * y[index]);
}
}
template<>
void caffe_gpu_eltwise_max<float>(const int N, const float alpha, const float* x,
const float beta, float* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
caffe_gpu_eltwise_max_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>
(N, alpha, x, beta, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_eltwise_max<double>(const int N,
const double alpha, const double* x, const double beta, double* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
caffe_gpu_eltwise_max_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>
(N, alpha, x, beta, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
#ifndef CPU_ONLY
template<>
void caffe_gpu_eltwise_max<float16>(const int N,
const float16 alpha, const float16* x, const float16 beta, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
caffe_gpu_eltwise_max_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>
(N, alpha, x, beta, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
#endif
template<typename Dtype>
__global__ void caffe_gpu_eltwise_min_kernel(const int N,
const Dtype alpha, const Dtype* x, const Dtype beta, Dtype* y) {
CUDA_KERNEL_LOOP(index, N) {
y[index] = min(alpha * x[index], beta * y[index]);
}
}
template<>
void caffe_gpu_eltwise_min<float>(const int N,
const float alpha, const float* x, const float beta, float* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
caffe_gpu_eltwise_min_kernel<float> <<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, alpha, x, beta, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_eltwise_min<double>(const int N,
const double alpha, const double* x, const double beta, double* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
caffe_gpu_eltwise_min_kernel<double> <<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, alpha, x, beta, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
#ifndef CPU_ONLY
template<>
void caffe_gpu_eltwise_min<float16>(const int N,
const float16 alpha, const float16* x, const float16 beta, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
caffe_gpu_eltwise_min_kernel<float16> <<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, alpha, x, beta, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
#endif
//Already defined above
//DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0)));
//DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(if_zero, y[index] = ((x[index] <= Dtype(0) && x[index] >= Dtype(-0) ) ? 1 : 0) );
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(if_nonzero, y[index] = ((x[index] > Dtype(0) || x[index] < Dtype(-0) ) ? 1 : 0) )
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(eltwise_multi, y[index] = y[index]*x[index] )
DEFINE_AND_INSTANTIATE_GPU_1NARY_FUNC(set, x[index] = value, value )
DEFINE_AND_INSTANTIATE_GPU_X2NARY_FUNC(zerout, y[index] = ((x[index] <= Dtype(threshold) && x[index] >= Dtype(-threshold)) ? Dtype(0) : x[index]), threshold )
template <typename Dtype>
struct CheckZeroFunctor {
CheckZeroFunctor(const Dtype threshold) : threshold(threshold) {
}
__host__ __device__ bool operator()(const Dtype& x) {
return (x<=(threshold) && x>=(-threshold));
}
const Dtype threshold;
};
//float16 is too small range for count. so use a int return type always.
template <typename Dtype>
int caffe_gpu_count_zero(const int N, const Dtype* x, Dtype threshold) {
CheckZeroFunctor<Dtype> check_zero(threshold);
thrust::device_ptr<const Dtype> pWrapper(x);
int count = thrust::count_if(pWrapper, pWrapper+N, check_zero);
return count;
}
template
int caffe_gpu_count_zero<float>(const int N, const float* x, float threshold);
template
int caffe_gpu_count_zero<double>(const int N, const double* x, double threshold);
template
int caffe_gpu_count_zero<float16>(const int N, const float16* x, float16 threshold);
struct transform_op_float16_to_float {
__host__ __device__ float operator()(const float16 v1) {
return float(v1);
}
};
template <typename Dtype>
Dtype caffe_gpu_min(const int N, const Dtype* x) {
thrust::device_ptr<const Dtype> pWrapper(x);
const thrust::device_ptr<const Dtype> min_val = thrust::min_element(pWrapper, pWrapper+N);
return *min_val;
}
template
float caffe_gpu_min(const int N, const float* x);
template
double caffe_gpu_min(const int N, const double* x);
//Avoid float16 errors, probably because thrust::greater(), thrust::lesser()
//are not available for float16: provide a different specializer here
template<>
float16 caffe_gpu_min(const int N, const float16* x){
thrust::device_ptr<const float16> pWrapper(x);
float min_val = thrust::transform_reduce(pWrapper, pWrapper+N, transform_op_float16_to_float(),
float(std::numeric_limits<float>::max()), thrust::less<float>());
return min_val;
}
template <typename Dtype>
Dtype caffe_gpu_max(const int N, const Dtype* x) {
thrust::device_ptr<const Dtype> pWrapper(x);
const thrust::device_ptr<const Dtype> max_val = thrust::max_element(pWrapper, pWrapper+N);
return *max_val;
}
template
float caffe_gpu_max(const int N, const float* x);
template
double caffe_gpu_max(const int N, const double* x);
//Avoid float16 errors, probably because thrust::greater(), thrust::lesser()
//are not available for float16: provide a different specializer here
template<>
float16 caffe_gpu_max(const int N, const float16* x) {
thrust::device_ptr<const float16> pWrapper(x);
float max_val = thrust::transform_reduce(pWrapper, pWrapper+N, transform_op_float16_to_float(),
float(std::numeric_limits<float>::lowest()), thrust::greater<float>());
return max_val;
}
} // namespace caffe
|
9e8f486c2f3577715294ab5ca18cbc15bb1688bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
__global__ void
magma_zlobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaDoubleComplex * x )
{
int idx = threadIdx.x; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if ( row<num_rows) {
magmaDoubleComplex tmp = x[idx];
__syncthreads();
if ( idx > shift-1 ) {
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in,out]
x magmaDoubleComplex_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zlobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaDoubleComplex_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( magmaDoubleComplex );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = int( sqrt( double( num_rows )));
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
hipLaunchKernelGGL(( magma_zlobpcg_shift_kernel), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
| 9e8f486c2f3577715294ab5ca18cbc15bb1688bc.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
__global__ void
magma_zlobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaDoubleComplex * x )
{
int idx = threadIdx.x; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if ( row<num_rows) {
magmaDoubleComplex tmp = x[idx];
__syncthreads();
if ( idx > shift-1 ) {
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in,out]
x magmaDoubleComplex_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zlobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaDoubleComplex_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( magmaDoubleComplex );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = int( sqrt( double( num_rows )));
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
magma_zlobpcg_shift_kernel<<< grid, block, Ms, queue->cuda_stream() >>>
( num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
|
9e4ebe53815313f575117425d745682fff638d45.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
// gpu - cuda includes
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "cutil_common.h"
#include <gstm_common.h>
#include "gstm_kernel.h"
extern "C" int var_size_valid(byte name, int th)
{
int num_var = constname2numconstvar(name);
if(th < num_var) return 1;
else return 0;
}
// const: pointer to a constraint [ name var1 var2 ...]
// th: order in sequence of variable
// var_const_max: bytes to represent the amount of variable name.
// 1) get name, 2) check range, 3) return ptr to the variable at th.(No conversion)
// output vpos: pointer to variable byte string or NULL
extern "C" void get_varstr_inconst(byte* aconst, int th, size_t var_const_max, byte* vpos, int is_uniform_var_width)
{
byte constname = aconst[0];
int offset = (is_uniform_var_width)? var_const_max : 1;
if(var_size_valid(constname, th)) {
int i;
for(i=0;i<var_const_max;i++)
vpos[i] = aconst[th * var_const_max + offset];
}
else {
printf("Err: get_variable_inconst - th is bigg/equal to number of variable for given constraint. constname:%c, requested varidx:%d\n", constname, th);
vpos = 0;
}
}
// output: variable value (as integer).
extern "C" int get_variable_inconst(byte* aconst, int th, size_t var_const_max, int is_uniform_var_width)
{
printf("get_variable in _const:%d\n", aconst[0]);
print_a_constraint(aconst, var_const_max, is_uniform_var_width);
// var_str: pointer to byte array where th'th variable sits
byte* p_var_str = (byte*)malloc(sizeof(byte) * var_const_max);
get_varstr_inconst(aconst, th, var_const_max, p_var_str, is_uniform_var_width);
printf("-1, var_str:%s at %dth\n", p_var_str, th);
int* p_var_num = (int*)malloc (sizeof(int));
bytes2int(p_var_str, p_var_num, var_const_max);
printf("-2, *p_var_num = %d\n", *p_var_num);
return *p_var_num;
}
// output 0, if c is neither analysis name nor constraint name
// output 1, if either analysis name or constraint name
extern "C" int is_const_name(byte c)
{
int var_size = constname2numconstvar(c);
if(0 == var_size) return 0;
else return 1;
}
extern "C" int is_constraintstream_empty(int num_var, size_t var_const_max, SbaStream* ss_const)
{
int empty = 1; // set empty = TRUE.
// check only num_const.
int* num_const = ss_const->num_const;
int j;
for(j=0;j<num_var;j++) {
if((num_const != 0) && (num_const[j] != 0)) empty = 0; // empty==FALSE -> not empty
}
return empty;
/*
byte** constnames = ss_const->constnames;
byte** constm = ss_const->constm;
int k, l, m;
//check content.
for(k=0;k<num_var;k++) {
int offset = 0;
int num_const_var = num_const[k];
byte* cname = constnames[k];
for(l=0;l<num_const_var;l++) {
// is either analysis or constraint name, then empty = 0 (means not empty).
if (is_const_name (constm[k][offset])) {
empty = 0;
return empty;
}
int const_length = (cname[l], var_const_max, 1); // uniform_width
offset += const_length;
}
}
// at this point, constraint is turned out to be empty.
return empty;
*/
}
// allocate space as same as input arguments
extern "C" int* init_from_num_const(int num_var, int value, GpuFlag gpu_flag)
{
int* out_num_const;
size_t num_const_size = num_var * sizeof(int);
if(gpu_flag) { // gpu
CudaSafeCall(hipMalloc((void**)&out_num_const, num_const_size));
CudaSafeCall(hipMemset((void*)out_num_const, value, num_const_size));
}
else {
out_num_const = (int*)malloc(num_const_size);
memset((void*) out_num_const, value, num_const_size);
}
return out_num_const;
}
// value is initial values in the new storage
extern "C" byte** init_from_constnames(int num_var, int* num_const, byte value, GpuFlag gpu_flag)
{
int k;
int mx_num_const;
size_t sz_num_var = sizeof(int) * num_var;
printf("-----1.2-----\n");
if(gpu_flag) { // num_var is host, num_const : byte** points gpu location.
int* h_num_const = (int*)malloc(sz_num_var);
CudaSafeCall(hipMemcpy(h_num_const, num_const, sz_num_var, hipMemcpyDeviceToHost));
mx_num_const = max_num_constraints(num_var, h_num_const);
}
else{
mx_num_const = max_num_constraints(num_var, num_const);
}
size_t constnamesp_size = sizeof (byte*) * num_var;
byte** out_constnames;
if(gpu_flag) { // gpu
byte** dcnp = (byte**)malloc(constnamesp_size);;
int* nck = (int*)malloc(sz_num_var);
hipMemcpy((void*)nck, num_const, sz_num_var, hipMemcpyDeviceToHost);
for(k=0;k<num_var;k++) {
size_t constnames_size = sizeof(byte) * nck[k];
CudaSafeCall(hipMalloc((void**) dcnp + k, constnames_size));
CudaSafeCall(hipMemset((void*) *(dcnp + k), value, constnames_size));
}
CudaSafeCall(hipMalloc((void**)&out_constnames, constnamesp_size));
hipMemcpy(out_constnames, dcnp, constnamesp_size, hipMemcpyDeviceToDevice);
}
else {
out_constnames = (byte**) malloc (constnamesp_size);
for(k=0;k<num_var;k++) {
out_constnames[k] = (byte*) malloc (sizeof(byte) * mx_num_const);
memset((void*)out_constnames[k], value, (size_t)mx_num_const);
}
}
return out_constnames;
}
// value is initial values in the new storage
// num_const is gpu address
extern "C" byte** init_from_constm(int num_var, int* num_const, size_t var_const_max, byte value, GpuFlag gf, int is_uniform_var_width)
{
// preparing for answer storage by taking biggest constraint with maximum number of constraints
// for every variable. (sufficiently large area)
size_t sz_num_var = sizeof(int) * num_var;
int mx_const_byte = longest_constraint_byte(var_const_max, is_uniform_var_width);
int mx_num_const;
if(gf) { // num_var is host, num_const : byte** points gpu location.
int* h_num_const = (int*)malloc(sz_num_var);
CudaSafeCall(hipMemcpy(h_num_const, num_const, sz_num_var, hipMemcpyDeviceToHost));
mx_num_const = max_num_constraints(num_var, h_num_const);
}
else{
mx_num_const = max_num_constraints(num_var, num_const);
}
size_t constmp_size = num_var*sizeof(byte*);
byte** out_constm;
int k;
if(gf) { // gpu
byte** dstmp = (byte**)malloc(constmp_size);;
int* nck = (int*)malloc(sz_num_var);
hipMemcpy((void*)nck, num_const, sz_num_var, hipMemcpyDeviceToHost);
for(k=0;k<num_var;k++) {
size_t constm_size = sizeof(byte) * nck[k] * mx_const_byte;
CudaSafeCall(hipMalloc((void**) dstmp + k, constm_size));
CudaSafeCall(hipMemset((void*) *(dstmp + k), value, constm_size));
}
CudaSafeCall(hipMalloc((void***)&out_constm, constmp_size));
hipMemcpy(out_constm, dstmp, constmp_size, hipMemcpyDeviceToDevice);
}
else {
out_constm = (byte**) malloc (sizeof(byte*) * num_var);
for(k=0;k<num_var;k++) {
int size_out = mx_const_byte * mx_num_const;
out_constm[k] = (byte*) malloc (sizeof (byte) * size_out);
memset((void*)out_constm[k], value, (size_t)size_out);
}
}
return out_constm;
}
extern "C" SbaStream* SbaStream_init_empty (int num_var)
{
SbaStream *ss = (SbaStream*)malloc(sizeof(SbaStream));
ss->num_const = (int*)malloc(sizeof(int) * num_var);
ss->constnames = (byte**)malloc(sizeof(byte*) * num_var);
ss->constm = (byte**)malloc(sizeof(byte*) * num_var);
int i;
for(i=0;i<num_var;i++) {
ss->num_const[i] = 0;
ss->constnames[i] = 0;
ss->constm[i] = 0;
}
//printf ("ss:%p, num_const:%p, constnames:%p, constm:%p\n", ss, ss->num_const, ss->constnames, ss->constm);
return ss;
}
// currently when gpu_flag==1,this proc doesn't do valid operation.
extern "C" void init_from_SbaStream(int num_var, size_t var_const_max, SbaStream* ss_in, SbaStream** ss_out, GpuFlag gf, int is_uniform_var_width)
{
int value_int = 0;
byte value_byte = 0;
if(gf) // gpu initialization
{
int* num_const = init_from_num_const(num_var, value_int, gf);
// constnames
SbaStream* ssh_in = (SbaStream*)malloc(sizeof(SbaStream));
CudaSafeCall(hipMemcpy ((void*)ssh_in, ss_in, sizeof(SbaStream), hipMemcpyDeviceToHost));
printf("---2---\n");
byte** constnames = init_from_constnames(num_var, ssh_in->num_const, value_byte, gf);
printf("---3---\n");
byte** constm = init_from_constm(num_var, ssh_in->num_const, var_const_max, value_byte, gf, is_uniform_var_width);
printf("---4---\n");
SbaStream* ss_store;
CudaSafeCall(hipMalloc((void**) & ss_store, sizeof(SbaStream)));
printf("---5---\n");
CudaSafeCall(hipMemcpy((void*) ss_store->num_const, num_const, sizeof(int*), hipMemcpyDeviceToDevice));
printf("---6---\n");
CudaSafeCall(hipMemcpy((void*) ss_store->constnames, constnames, sizeof(byte**), hipMemcpyDeviceToDevice));
printf("---7---\n");
CudaSafeCall(hipMemcpy((void*) ss_store->constm, constm, sizeof(byte**), hipMemcpyDeviceToDevice));
printf("---8---\n");
CudaSafeCall(hipMemcpy((void*) *ss_out, ss_store, sizeof(SbaStream*), hipMemcpyDeviceToDevice));
}
else // gpu_flag==0 host
{
int* num_const = init_from_num_const(num_var, value_int, gf);
byte** constnames = init_from_constnames(num_var, ss_in->num_const, value_byte, gf);
byte** constm = init_from_constm(num_var, ss_in->num_const, var_const_max, value_byte, gf, is_uniform_var_width);
SbaStream* ss_store = (SbaStream*)malloc(sizeof(SbaStream));
ss_store->num_const = num_const;
ss_store->constnames = constnames;
ss_store->constm = constm;
*ss_out = ss_store;
}
return;
}
extern "C" int* copy_from_num_const(int num_var, int* num_const)
{
int i;
int* out_num_const = (int*)malloc(sizeof(int) * num_var);
for(i=0;i<num_var;i++)
out_num_const[i] = num_const[i];
return out_num_const;
}
extern "C" byte** copy_from_constnames(int num_var, int* num_const, byte** constnames)
{
int i,j;
byte** out_constnames = (byte**) malloc (sizeof(byte*) * num_var);
for(i=0;i<num_var;i++) {
out_constnames[i] = (byte*) malloc (sizeof(byte) * num_const[i]);
for(j=0;j<num_const[i];j++)
out_constnames[i][j] = constnames[i][j];
}
printf("copy_from_constnames:%p memcpyn", out_constnames);
return out_constnames;
}
extern "C" byte** copy_from_constm(int num_var, int* num_const, byte** constname, size_t var_const_max, byte** constm)
{
int i,j;
byte** out_bytes = (byte**) malloc (sizeof(byte*) * num_var);
size_t* vconstm_size = (size_t*) malloc (sizeof (size_t) * num_var);
for(i=0;i<num_var;i++) {
for(j=0;j<num_const[i];j++)
vconstm_size[i] += constraint_length(constname[i][j], var_const_max, 1);
}
for(i=0;i<num_var;i++) {
int constsize_byte = vconstm_size[i];
byte* vconst_src = constm[i];
byte* vconst_dst = (byte*) malloc (sizeof(byte) * constsize_byte);
memcpy(vconst_dst, vconst_src, constsize_byte);
out_bytes[i] = vconst_dst;
}
return out_bytes;
}
// host code.
extern "C" SbaStream* copy_from_SbaStream(int num_var, size_t var_const_max, SbaStream* ss_in)
{
//printf("-------reflection---------\n");
int* ref_num_const = copy_from_num_const (num_var, ss_in->num_const);
byte** ref_constnames = copy_from_constnames(num_var, ss_in->num_const,
ss_in->constnames);
byte** ref_constm = copy_from_constm(num_var, ss_in->num_const,
ss_in->constnames, var_const_max, ss_in->constm);
SbaStream *ss_out = (SbaStream*)malloc(sizeof(SbaStream));
ss_out->num_const = ref_num_const;
ss_out->constnames = ref_constnames;
ss_out->constm = ref_constm;
return ss_out;
}
// ss_f (from) --> ss_t (to)
// currently implemented from HOST to DEVICE
extern "C" void sbastream_alloc_copy(int num_var, size_t var_const_max, SbaStream* ss_f, SbaStream** ss_t, hipMemcpyKind direction)
{
// HOST -> Device
if(direction == hipMemcpyHostToDevice) {
// Number of constraints ///////////////////////////////////////////
int* num_const_f = ss_f->num_const; //array of number_of_constraints
size_t num_const_size = num_var * sizeof(int);
int* num_const_t = 0; // pointer to destination array of num_const
CudaSafeCall(hipMalloc((void**)&num_const_t, num_const_size));
hipMemcpy(num_const_t, num_const_f, num_const_size, direction);
printf("num_const:%p, %d\n", num_const_f, num_const_f[0]);
size_t pconsts_size = num_var * sizeof(byte*);
// Constraint Names ///////////////////////////////////////////
byte** constnames_f = ss_f->constnames;
byte** h_pcn = (byte**)malloc(num_var * sizeof (byte*));
int i;
for (i=0;i<num_var;i++) {
size_t cnlength = num_const_f[i] * sizeof(byte);
printf ("constname at %d(%d), %p \n", i, cnlength, constnames_f[i]);
if(cnlength > 0) printf ("first char:%c\n", constnames_f[i][0]);
hipMalloc((void**) h_pcn+i, cnlength);
hipMemcpy(*(h_pcn+i), constnames_f[i], cnlength, hipMemcpyHostToDevice);
}
byte** constnames_t;
hipMalloc((byte**) &constnames_t, pconsts_size);
CudaSafeCall(hipMemcpy(constnames_t, h_pcn, pconsts_size, hipMemcpyHostToDevice));
// constraint stream: constm ///////////////////////////////////////////
byte** constm_f = ss_f->constm;
byte** h_pctm = (byte**)malloc(num_var * sizeof (byte*));
for (i=0;i<num_var;i++) {
size_t constms_byte = sizeof(byte) * sum_const_sizes(constnames_f[i], num_const_f[i], var_const_max);
hipMalloc((void**) h_pctm + i, constms_byte);
hipMemcpy(*(h_pctm + i), constm_f[i], constms_byte, hipMemcpyHostToDevice);
if(constm_f[i] != 0) printf("constm(%d) first char:%c\n", i, constm_f[i][0]);
else printf("cosntm(%d) is null\n", i);
}
byte** constm_t = 0; // device
hipMalloc((void**) &constm_t, pconsts_size);
hipMemcpy(constm_t, h_pctm, pconsts_size, hipMemcpyHostToDevice);
// SbaStream //////////////////////////////////////////////
SbaStream* ss_htmp = (SbaStream*)malloc(sizeof(SbaStream));
ss_htmp->num_const = num_const_t;
ss_htmp->constnames = constnames_t;
ss_htmp->constm = constm_t;
printf("Sbastm is made, %p, %p, %p\n", ss_htmp->num_const, ss_htmp->constnames, ss_htmp->constm);
hipMalloc(ss_t, sizeof(SbaStream));
hipMemcpy((void*) *ss_t, ss_htmp, sizeof(SbaStream), hipMemcpyHostToDevice);
}
// Device -> HOST
else {
SbaStream* tmp = (SbaStream*)malloc(sizeof(SbaStream));
hipMemcpy(tmp, ss_f, sizeof(SbaStream), hipMemcpyDeviceToHost);
printf("upto here, %p, %p, %p\n", tmp->num_const, tmp->constnames, tmp->constm);
// num_const
int num_const_size = sizeof(int) * num_var;
int* num_const_h = (int*)malloc(num_const_size);
hipMemcpy(num_const_h, tmp->num_const, num_const_size, hipMemcpyDeviceToHost);
int i;
for(i=0;i<num_var;i++) {
printf("numconst at %d = %d\n", i, num_const_h[i]);
}
// constnames
int cnp_size = num_var* sizeof(byte*);
byte** cn_ptr = (byte**)malloc(cnp_size);
hipMemcpy(cn_ptr, tmp->constnames, cnp_size, hipMemcpyDeviceToHost);
printf("ptr-cn fetch\n");
byte** cnp = (byte**) malloc (num_var * sizeof(byte*));
for(i=0;i<num_var;i++) {
size_t constnames_size = sizeof(byte) * num_const_h[i];
cnp[i] = (byte*) malloc (constnames_size);
hipMemcpy(cnp[i], cn_ptr[i], constnames_size, hipMemcpyDeviceToHost);
int j;
for(j=0;j<constnames_size;j++)
printf("%dth byte:%c\n", j, cnp[i][j]);
}
// constm
byte** cstm_ptr = (byte**)malloc(cnp_size);
hipMemcpy(cstm_ptr, tmp->constm, cnp_size, hipMemcpyDeviceToHost); // pointers
printf("ptr-stc fetch, %d\n", num_var);
byte** cstmp = (byte**) malloc (num_var * sizeof(byte*));
for(i=0;i<num_var;i++) {
size_t constms_byte = sizeof(byte) * sum_const_sizes(cnp[i], num_const_h[i], var_const_max);
cstmp[i] = (byte*) malloc (constms_byte);
hipMemcpy(cstmp[i], cstm_ptr[i], constms_byte, hipMemcpyDeviceToHost);
int j;
for(j=0;j<constms_byte;j++)
printf("%dth byte:%d\n", j, cstmp[i][j]);
}
printf("constm end\n");
*ss_t = (SbaStream*)malloc(sizeof(SbaStream));
(*ss_t)->num_const = num_const_h;
(*ss_t)->constnames = cnp;
(*ss_t)->constm = cstmp;
}
return;
}
//always Host to Device
extern "C" void csr_alloc_copy(int num_var, int num_tot_const, int max_num_const,
int** d_ptr, int** d_indices, byte** d_data,
int* s_ptr, int* s_indices, byte* s_data,
size_t sz_a_const,
hipMemcpyKind direction) {
// CSR structure
size_t sz_ptr = sizeof(int) * (max_num_const + 1);
size_t sz_indices = sizeof(int) * num_tot_const;
size_t sz_data = sz_a_const * num_tot_const;
printf(" csr_alloc_copy, sz_ptr:%d\n", sz_ptr);
printf(" csr_alloc_copy, sz_indices:%d\n", sz_indices);
printf(" csr_alloc_copy, sz_data:%d\n", sz_data);
if(direction == hipMemcpyHostToDevice) {
CudaSafeCall(hipMalloc(d_ptr, sz_ptr));
CudaSafeCall(hipMalloc(d_indices, sz_indices));
CudaSafeCall(hipMalloc(d_data, sz_data));
}
else if(direction == hipMemcpyDeviceToHost) {
*d_ptr = (int*) malloc(sz_ptr);
*d_indices = (int*) malloc(sz_indices);
*d_data = (byte*) malloc(sz_data);
}
else {
printf("csr_alloc_copy() - error - csr_alloc_copy only supports H2D, D2H\n");
exit(1);
}
CudaSafeCall(hipMemcpy(*d_ptr, s_ptr, sz_ptr, direction));
CudaSafeCall(hipMemcpy(*d_indices, s_indices, sz_indices, direction));
CudaSafeCall(hipMemcpy(*d_data, s_data, sz_data, direction));
return;
}
// input : uniform var-length, uniform constraint length
// 1 if constraint sizes are uniform
// 0 otherwise
extern "C" int warning_for_non_csr(int is_uniform_var_width, int is_equal_const_size) {
// if all constraints are in equal length, then each variable in a constratint have equal length too.
if(is_equal_const_size == 1) //is_uniform_var_width = 1;
return 1;
else if(is_uniform_var_width) {
printf("sba_solver_csr: This case cannot happen:constraints are not same is length, and they have uniform width in variable\n");
exit(1);
}
else {
printf("sba_solver_csr: This routine only support - equal sized constraints -and- variable lengths in constraint are uniform\n");
exit(1);
}
return 0;
}
// given input from Racket(list of list), produces CSR
// copy of code in project: xform_global_shared
extern "C" void transform_const_csr(int num_var, int num_tot_const, int max_num_const, int* num_const, byte** constm,
int* ptr, int* indices, byte* data, size_t sz_a_const, int is_uniform_var_width, int is_equal_const_size)
{
int new_uniform_var_width = warning_for_non_csr(is_uniform_var_width, is_equal_const_size);
if(new_uniform_var_width) is_uniform_var_width = 1;
int i,j,k,l;
k=0, l=0; // l: num of valid consts
// for(i=0;i<num_var;i++)
// for(j=0;j<num_const[i];j++) {
// k++;
// if(k>8389000) {
// printf("srcccccc%d th \t", k);
// print_a_constraint(&(constm[i][j]), 4, is_uniform_var_width);
// }
// }
//
// k=0;
for(i=0;i<max_num_const;i++) {
for(j=0;j<num_var;j++) {
if (i < num_const[j]) {
byte* p_const = constm[j]+ i*sz_a_const;
memcpy(data + k*sz_a_const, p_const, sz_a_const); // general version
// data[k] = constm[j][i]; // valid only when sz_a_const = 1
indices[k] = j;
l++;
k++;
// if(k > 8389000) {
// printf("src-const[%d]:\t", k);
// print_a_constraint(p_const, 4, is_uniform_var_width);
// }
}
}
ptr[i+1] = l;
}
// for(k=0;k<num_var;k++)
// printf("num const[%d:%d\n", k, num_const[k]);
// for(j=0;j<=max_num_const;j++)
// printf("transform_const's ptr[%d]:%d\n", j, ptr[j]);
return;
}
// num_elt : length of input array in cpu
// gnum_elt: pointer to length of input arary in gpu
// g_elts: pointer to the array in GPU
// output: pointer to array that contains the sum at the first location in GPU
extern "C" int* sum_gpu_kernel_int32(int num_elt, int* gnum_elt, int* gelts)
{
size_t sz_ull = sizeof(int);
size_t sz_elts = sz_ull * num_elt;
int* gnum_block;
hipMalloc(&gnum_block, sz_ull);
hipMemset(gnum_block, 0, sz_ull);
dim3 block1 = dim3 ((int) fmin((double)512, (double)num_elt/2.0), 1); // 512 threads deals with 1024 data.
int num_blk;
num_blk = (num_elt > block1.x)? num_elt / (2 * block1.x): 1; // because each thread process 2 elements
// return storage gsum
size_t sz_gsum = sz_ull * num_blk;
int *gsum;
hipMalloc(&gsum, sz_gsum);
hipMemset(gsum, 0, sz_gsum);
/*
unsigned int timerg_exe = 0;
cutCreateTimer(&timerg_exe);
cutResetTimer(timerg_exe);
cutStartTimer(timerg_exe);
*/
// Timer Event Prepare
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
// Start record
hipEventRecord(kernel_start, 0);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
do {
// printf("--------------do-------------\nnumelt:%d\n", num_elt);
// int* tmpelt = (int*)malloc(sz_elts);
// hipMemcpy(tmpelt, gelts, sz_elts, hipMemcpyDeviceToHost);
// for(i=0;i<num_elt;i++)
// printf("tmpelt[%d] = %llu\n", i, tmpelt[i]);
block1 = dim3 ((int) fmin((double)512, (double)num_elt/2.0), 1); // 512 threads deals with 1024 data.
num_blk = (num_elt > block1.x)? num_elt / (2 * block1.x): 1; // because each thread process 2 elements
int mxgrd [] = {65535, 65535, 1};
printf("grid max = %d, %d blocks\n", mxgrd[0], mxgrd[1]);
int gridx = (num_blk > mxgrd[0])? mxgrd[0] : num_blk;
int gridy = (num_blk > mxgrd[0])? num_blk / mxgrd[0] + 1: 1;
printf("num_blk:%d\n", num_blk);
printf("grid dimension x, y = %d, %d\n", gridx, gridy);
dim3 grid1 = dim3(gridx, gridy);
size_t shds1 = 2 * block1.x * sizeof(int); // need factor 2 because a thread add 2 elements.
printf("blockdim.x = %d threads, shd size = %d bytes\n", block1.x, shds1);
// clear used location
size_t sz_gsum = sz_ull * num_blk;
// new grid, block, shds
hipLaunchKernelGGL(( sum_kernel_int32), dim3(grid1), dim3(block1), shds1, 0, gnum_elt, gelts, gnum_block, gsum);
num_elt = num_blk;
sz_elts = sz_gsum;
// interchange:
int* tmp_num_elt = gnum_elt;
int* tmp_elts = gelts;
gnum_elt = gnum_block;
gelts = gsum;
gnum_block = tmp_num_elt;
gsum = tmp_elts;
// // copy output by printing next inputs
// int* cnum_elt = (int*)malloc(sz_ull);
// hipMemcpy(cnum_elt, gnum_elt, sz_ull, hipMemcpyDeviceToHost);
// printf("next - numelt:%d\n", *cnum_elt);
// int i;
// int* celts = (int*)malloc(sz_elts);
// hipMemcpy(celts, gelts, sz_elts, hipMemcpyDeviceToHost);
// for(i=0;i<(int)*cnum_elt;i++)
// printf("%d th next elt:%llu\n", i, celts[i]);
} while (num_blk != 1);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
cutStopTimer(timerg_exe);
double tvg = cutGetTimerValue(timerg_exe);
printf("gpu time glb(kernel):\n %e \n(%f)(ms)\n", tvg, tvg);
cutDeleteTimer(timerg_exe);
*/
// Stop event
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
float kernel_elapsedTime;
hipEventElapsedTime(&kernel_elapsedTime, kernel_start, kernel_stop); // that's our time!
// Clean up:
hipEventDestroy(kernel_start);
hipEventDestroy(kernel_stop);
printf("gpu time glb(kernel):\n %e \n(%f)(ms)\n", kernel_elapsedTime, kernel_elapsedTime);
return gelts;
}
//__global__ void add1(int* x)
//{
// *x = *x + 100;
// return;
//}
// convert ss_in into CSR
// copy CSR
// return back ss_out_analysis
extern "C" void sba_solve_csr(SbaStream* ss_in, int num_var, size_t var_const_max, SbaStream* ss_out_analysis,
int is_uniform_var_width, int is_equal_const_size)
{
int i;
int new_uniform_var_width = warning_for_non_csr(is_uniform_var_width, is_equal_const_size);
if(new_uniform_var_width) is_uniform_var_width = 1;
// uniform width for constraints
size_t sz_a_const = var_const_max * 4; // name + 3 references to variables.
// SbaStream -> num_tot_const
int num_tot_const = 0;
for(i=0;i<num_var;i++) {
num_tot_const += ss_in->num_const[i];
}
printf("tot_const:%d before convert to ...\n", num_tot_const);
// SbaStream -> max_num_const maximum number of constraints
int max_num_const = ss_in->num_const[0];
for(i=1;i<num_var;i++) {
int numcst = ss_in->num_const[i];
if(numcst > max_num_const) max_num_const = numcst;
}
// 3 vars for CSR structure.
int* ptr = (int*)malloc(sizeof(int) * (1 + max_num_const));
int* indices = (int*)malloc(sizeof(int) * num_tot_const);
byte* data = (byte*)malloc(sizeof(byte) * num_tot_const * sz_a_const);
//print_constraint_stream(num_var, var_const_max, ss_in, is_uniform_var_width, is_equal_const_size, 1000);
transform_const_csr(num_var, num_tot_const, max_num_const, ss_in->num_const, ss_in->constm,
ptr, indices, data, sz_a_const, is_uniform_var_width, is_equal_const_size);
//print_constraints_csr(num_var, num_tot_const, var_const_max, ptr, indices, data, sz_a_const, is_uniform_var_width, is_equal_const_size, 1000);
//////////////////////////////////////////////////////////////
//begin gpu timer
/*
unsigned int timerg_all = 0;
cutCreateTimer(&timerg_all);
cutResetTimer(timerg_all);
unsigned int timerg_exe = 0;
cutCreateTimer(&timerg_exe);
cutResetTimer(timerg_exe);
cutStartTimer(timerg_all);
*/
// Timer Event Prepare
hipEvent_t all_start, all_stop;
hipEventCreate(&all_start);
hipEventCreate(&all_stop);
// Start record
hipEventRecord(all_start, 0);
// copy to gpu
int* g_ptr;
int* g_indices;
byte* g_data;
csr_alloc_copy(num_var, num_tot_const, max_num_const,
&g_ptr, &g_indices, &g_data,
ptr, indices, data,
sz_a_const, hipMemcpyHostToDevice);
// // testing to gpu copy by copying back to cpu
// int* h_ptr;
// int* h_indices;
// byte* h_data;
// csr_alloc_copy(num_var, num_tot_const, max_num_const,
// &h_ptr, &h_indices, &h_data,
// g_ptr, g_indices, g_data,
// sz_a_const, hipMemcpyDeviceToHost);
//
// printf("num_total constraints:%d\n", num_tot_const);
// print_constraints_csr(num_var, num_tot_const, var_const_max, h_ptr, h_indices, h_data, sz_a_const, is_uniform_var_width, is_equal_const_size, 1000);
//
// output matrices of init_constraints_kernel()
size_t sz_mat = sz_a_const * num_var * max_num_const;
byte* g_constm;
CudaSafeCall(hipMalloc(&g_constm, sz_mat));
CudaSafeCall(hipMemset(g_constm, 0, sz_mat));
byte* g_asis;
CudaSafeCall(hipMalloc(&g_asis, sz_mat));
CudaSafeCall(hipMemset(g_asis, 0, sz_mat));
// lock, varnum for matrices g_const, g_asis
size_t sz_locks = sizeof(int) * num_var;
size_t sz_varnums = sizeof(int) * num_var;
int* g_lock_const;
CudaSafeCall(hipMalloc(&g_lock_const, sz_locks));
CudaSafeCall(hipMemset(g_lock_const, 0, sz_locks));
int* g_varnum_const;
CudaSafeCall(hipMalloc(&g_varnum_const, sz_varnums));
CudaSafeCall(hipMemset(g_varnum_const, 0, sz_varnums));
int* g_lock_asis;
CudaSafeCall(hipMalloc(&g_lock_asis, sz_locks));
CudaSafeCall(hipMemset(g_lock_asis, 0, sz_locks));
int* g_varnum_asis;
CudaSafeCall(hipMalloc(&g_varnum_asis, sz_varnums));
CudaSafeCall(hipMemset(g_varnum_asis, 0, sz_varnums));
int is_const_empty = 1;
int *g_is_const_empty;
CudaSafeCall(hipMalloc(&g_is_const_empty, sizeof(int)));
CudaSafeCall(hipMemcpy(g_is_const_empty, &is_const_empty, sizeof(int), hipMemcpyHostToDevice));
// initialize const-mat, asis-mat from ptr, indices, data
// each matrix is size (max_num_const x num_var),
// iteration is as long as num_tot_const
// read indices, data write to the matrix.
// row in matrix is defined by 1) varnum[] - current empty sp in mat, 2)lock[] - gateway showing accessability.
unsigned int binary_num_tot_const = least_upper_binary(num_tot_const);
// printf("binary upper bound of total const: %d\n", binary_num_tot_const);
/*
size_t cpyamount_device =
sizeof(int) * (max_num_const + 1) //ptr
+ sizeof(int) * num_tot_const //indices
+ sz_a_const * num_tot_const //data
+ 2*sz_mat + 2*sz_locks + 2*sz_varnums;
*/
int max_threads = (int) fmin(binary_num_tot_const, (float)pow(2,8)); //256
dim3 block_init(max_threads, 1);
dim3 grid_init(binary_num_tot_const/max_threads,1);
// if(block.x * grid.x >= 512*65536) {
// printf("cuda hw cannot support so many constraints\n");
// exit(1);
// }
// else
// printf("global memory was occupied by copy: %d bytes\n", cpyamount_device);
/*
cutStartTimer(timerg_exe);
*/
hipLaunchKernelGGL(( init_constraints_kernel_csr), dim3(grid_init), dim3(block_init), 0, 0,
num_var, max_num_const, num_tot_const, sz_a_const, // num of variables, longest const, total const, single const size
g_indices, g_data, // input data in CSR form
g_lock_const, g_varnum_const, // lock, positioner for new constm
g_constm, // storage for new constm
g_lock_asis, g_varnum_asis, // lock, positioner for new asis
g_asis,
g_is_const_empty); // set to 0 (not empty) is kernel read constraint during initialization
// solve_constraint kernel
printf("INIT-> no constraint exist\n");
size_t sz_num_var = sizeof(int) * num_var;
int* h_varnum_const = (int*)malloc(sz_num_var);
CudaSafeCall(hipMemcpy(h_varnum_const, g_varnum_const, sz_num_var, hipMemcpyDeviceToHost));
for(i=0;i<num_var;i++)
printf("org varnum_const[%d] = %d\n", i, h_varnum_const[i]);
int* h_varnum_asis = (int*)malloc(sz_num_var);
CudaSafeCall(hipMemcpy(h_varnum_asis, g_varnum_asis, sz_num_var, hipMemcpyDeviceToHost));
for(i=0;i<num_var;i++)
printf("org varnum_asis[%d] = %d\n", i, h_varnum_asis[i]);
// reflection definition
int* g_varnum_refl;
CudaSafeCall(hipMalloc(&g_varnum_refl, sz_varnums));
CudaSafeCall(hipMemcpy(g_varnum_refl, g_varnum_const, sz_varnums, hipMemcpyDeviceToDevice));
//printf("1\n");
byte* g_reflection;
CudaSafeCall(hipMalloc(&g_reflection, sz_mat));
CudaSafeCall(hipMemcpy(g_reflection, g_constm, sz_mat, hipMemcpyDeviceToDevice));
//printf("2\n");
CudaSafeCall(hipMemcpy(&is_const_empty, g_is_const_empty, sizeof(int), hipMemcpyDeviceToHost));
if(is_const_empty)
printf ("constraint is not empty");
else printf ("constraint is empty");
int* g_lock_new_const;
int* g_varnum_new_const;
byte* g_new_constm;
int binary_max_num_const = least_upper_binary(max_num_const);
int n_compare = binary_max_num_const * binary_max_num_const;
int max_threads_block = 512;
int bl = max_threads_block < n_compare;
if(bl) {
printf("constraint - comparisons (%d) are more than max number of threads(%d) -> exit",
n_compare, max_threads_block);
exit(1);
}
printf ("max num const: %d\n", binary_max_num_const);
int blk_c = max_num_const;
int blk_v = max_threads_block / n_compare;
dim3 block_solve(blk_v, blk_c, blk_c);
int gl = num_var / blk_v;
int grd_x = (gl)? gl : 1;
dim3 grid_solve(grd_x, 1, 1);
// this is for collecting sum of varnums in each block.
// we'll check if one of them are not zero -> iteration needed.
int* gsum_varnum_grid;
size_t sz_varnum_grid = sizeof(int) * grid_solve.x;
CudaSafeCall(hipMalloc(&gsum_varnum_grid, sz_varnum_grid));
CudaSafeCall(hipMemset(gsum_varnum_grid, 0, sz_varnum_grid));
// Shared memory size definition // for first kernel (solve_constraints)
size_t shd_solve = 2 * num_var * sizeof(int); // as big as to hold 2 arrays : lock_const, lock_asis
// Shared memory size definition // for second kernel (or_varnum_grid)
// for each block, we compute or_varnum_grid_kernel for the block
// gather to global memory, apply(or_varnum_grid_kernel) again until only one elt left in global.
//size_t shd_varnum = block_solve.x * sizeof(int);
int* g_sum_varnum;
CudaSafeCall(hipMalloc(&g_sum_varnum, sizeof(int)));
CudaSafeCall(hipMemset(g_sum_varnum, 0, sizeof(int)));
int cnt=0;
while(!is_const_empty) {
CudaSafeCall(hipMemcpy(g_is_const_empty, &is_const_empty, sizeof(int), hipMemcpyHostToDevice));
// new constraint - pair
CudaSafeCall(hipMalloc(&g_lock_new_const, sz_varnums));
CudaSafeCall(hipMemset(g_lock_new_const, 0, sz_varnums));
CudaSafeCall(hipMalloc(&g_varnum_new_const, sz_varnums));
CudaSafeCall(hipMemset(g_varnum_new_const, 0, sz_varnums));
CudaSafeCall(hipMalloc(&g_new_constm, sz_mat));
CudaSafeCall(hipMemset(g_new_constm, 0, sz_mat));
// testing sensor insertion
int sensor = 101;
int* g_sensor;
CudaSafeCall(hipMalloc(&g_sensor, sizeof(int)));
CudaSafeCall(hipMemcpy(g_sensor, &sensor, sizeof(int), hipMemcpyHostToDevice));
// // test for thread assignment
// dim3 g(1,1);
// dim3 b(1,1,1);
// printf("is constraint empty? = %d\n", is_const_empty);
// printf("grid x y = %d, %d\n", grid_solve.x, grid_solve.y);
// printf("block x y z = %d, %d, %d\n", block_solve.x, block_solve.y, block_solve.z);
// g_const_sample to see throught the kernel inside.
int* g_const_sample;
CudaSafeCall(hipMalloc(&g_const_sample, sz_a_const));
// kernel
hipLaunchKernelGGL(( solve_constraints_kernel), dim3(grid_solve), dim3(block_solve), shd_solve, 0,
num_var, max_num_const, sz_a_const,
g_varnum_refl, g_reflection,
g_varnum_const, g_constm,
g_lock_asis, g_varnum_asis, g_asis,
g_lock_new_const, g_varnum_new_const, g_new_constm, g_sensor, g_const_sample);
sensor = 99;
CudaSafeCall(hipMemcpy(&sensor, g_sensor, sizeof(int), hipMemcpyDeviceToHost));
printf("sensor = %d\n", sensor);
byte* sample = (byte*)malloc(sz_a_const);
CudaSafeCall(hipMemcpy(sample, g_const_sample, sz_a_const, hipMemcpyDeviceToHost));
print_a_constraint(sample, var_const_max, is_uniform_var_width);
// check emptyness of constraints by adding them all and get is_const_emtpy
printf("in while empty? = %d\n", is_const_empty);
int* tmpvarnum = (int*)malloc(sz_varnums);
CudaSafeCall(hipMemcpy(tmpvarnum, g_varnum_new_const, sz_varnums, hipMemcpyDeviceToHost));
for(i=0;i<num_var;i++)
printf("%d - varnum const [%d] = %d\n", cnt, i, tmpvarnum[i]);
cnt++;
int* gnum_var;
CudaSafeCall(hipMalloc(&gnum_var, sizeof(int)));
CudaSafeCall(hipMemcpy(gnum_var, &num_var, sizeof(int), hipMemcpyHostToDevice));
g_sum_varnum = sum_gpu_kernel_int32 (num_var, gnum_var, g_varnum_new_const);
int* sum_varnum = (int*) malloc(sizeof(int));
CudaSafeCall(hipMemcpy(sum_varnum, g_sum_varnum, sizeof(int), hipMemcpyDeviceToHost));
printf("sum of all varnum_const = %d\n", *sum_varnum);
is_const_empty = 1;//(*sum_varnum == 0) ? 1 : 0; // sun=0 -> empty const -> is_const_emtpy = 1.
}
/*
cutStopTimer(timerg_exe);
cutStopTimer(timerg_all);
*/
printf("end of kernel invoke\n");
// test g_varnum_const, g_varnum_asis
int* varnum_const = (int*)malloc(sz_varnums);
memset(varnum_const, 0, sz_varnums);
CudaSafeCall(hipMemcpy(varnum_const, g_varnum_const, sz_varnums, hipMemcpyDeviceToHost));
byte* h_constm = (byte*) malloc(sz_mat);
CudaSafeCall(hipMemcpy(h_constm, g_constm, sz_mat, hipMemcpyDeviceToHost));
int* varnum_asis = (int*)malloc(sz_varnums);
memset(varnum_asis, 0, sz_varnums);
CudaSafeCall(hipMemcpy(varnum_asis, g_varnum_asis, sz_varnums, hipMemcpyDeviceToHost));
byte* h_asis = (byte*) malloc(sz_mat);
CudaSafeCall(hipMemcpy(h_asis, g_asis, sz_mat, hipMemcpyDeviceToHost));
printf("numvar:%d\n", num_var);
for(i=0;i<num_var;i+=1) {
printf("init -- var[%d], numconst:%d, num_ais:%d\n", i, varnum_const[i], varnum_asis[i]);
}
int* varnum_new_const = (int*)malloc(sz_varnums);
memset(varnum_new_const, 0, sz_varnums);
CudaSafeCall(hipMemcpy(varnum_new_const, g_varnum_new_const, sz_varnums, hipMemcpyDeviceToHost));
for(i=0;i<num_var;i+=1) {
printf("after exec -- var[%d], numconst:%d, num_ais:%d\n", i, varnum_new_const[i], varnum_asis[i]);
}
/*
double tvg = cutGetTimerValue(timerg_exe);
printf("gpu time glb(kernel):\n %e \n(%f)(ms)\n", tvg, tvg);
cutDeleteTimer(timerg_exe);
double tvga = cutGetTimerValue(timerg_all);
printf("gpu time glb(kernel+in-copy):\n %e \n(%f)(ms)\n", tvga, tvga);
cutDeleteTimer(timerg_all);
*/
// Stop event
hipEventRecord(all_stop, 0);
hipEventSynchronize(all_stop);
float all_elapsedTime;
hipEventElapsedTime(&all_elapsedTime, all_start, all_stop); // that's our time!
// Clean up:
hipEventDestroy(all_start);
hipEventDestroy(all_stop);
printf("gpu time glb(all):\n %e \n(%f)(ms)\n", all_elapsedTime, all_elapsedTime);
// printf("printing - constraints ...\n");
// print_constraints_gpu(num_var, sz_a_const, varnum_const, h_constm, is_uniform_var_width);
// printf("printing - asis ...\n");
// print_constraints_gpu(num_var, sz_a_const, varnum_asis, h_asis, is_uniform_var_width);
return;
}
extern "C" void sba_solve_stm(SbaStream* ss_in, int num_var, size_t var_const_max, SbaStream* ss_out_analysis,
int is_uniform_var_width, int is_equal_const_size)
{
// memalloc/memcpy for ss_in, ss_outconstraints, ss_out_analysis
// ss_in : host
SbaStream* ssg_in; //device
sbastream_alloc_copy(num_var, var_const_max, ss_in, &ssg_in, hipMemcpyHostToDevice);
printf("ssg_in\n");
// printf("test-----------------------------------------------------------------------------\n");
// SbaStream* ssh_in;
// sbastream_alloc_copy(num_var, var_const_max, ssg_in, &ssh_in, hipMemcpyDeviceToHost);
// print_constraint_stream(num_var, var_const_max, ssh_in, is_uniform_var_width, is_equal_const_size);
//
// printf("alloc constraint/analysis--------------------------------------------------------\n");
// last arg 1 means copying at gpu
SbaStream* ss_dfields = (SbaStream*)malloc(sizeof(SbaStream));
CudaSafeCall(hipMemcpy(ss_dfields, ssg_in, sizeof(SbaStream), hipMemcpyDeviceToHost));
int* ssg_cst_num_var;
byte** ssg_cst_constnames;
byte** ssg_cst_constm;
ssg_cst_num_var = init_from_num_const(num_var, 0, GPU);
ssg_cst_constnames = init_from_constnames(num_var, ss_dfields->num_const, '\0', GPU);
ssg_cst_constm = init_from_constm(num_var, ss_dfields->num_const, var_const_max, '\0', GPU, is_uniform_var_width);
int* ssg_anlys_num_var;
byte** ssg_anlys_constnames;
byte** ssg_anlys_constm;
ssg_anlys_num_var = init_from_num_const(num_var, 0, GPU);
ssg_anlys_constnames = init_from_constnames(num_var, ss_dfields->num_const, '\0', GPU);
ssg_anlys_constm = init_from_constm(num_var, ss_dfields->num_const, var_const_max, '\0', GPU, is_uniform_var_width);
/* Sbastream from given SbaStream at GPU is not dealt as it is, instead each field are created independently.
// ss_out_constraints : host
SbaStream* ssg_out_constraints; // ssg_out_constraints
sbastream_alloc_copy(num_var, var_const_max, ss_out_constraints, &ssg_out_constraints, hipMemcpyHostToDevice);
printf("ssg_out\n");
// ss_out_analysis : host
SbaStream* ssg_out_analysis; // device
sbastream_alloc_copy(num_var, var_const_max, ss_out_analysis, &ssg_out_analysis, hipMemcpyHostToDevice);
printf("ssg_out_analysis\n");
*/
int max_const_size = max_num_constraints(num_var, ss_in->num_const);
dim3 threads(num_var, max_const_size);
dim3 grid(1);
hipLaunchKernelGGL(( init_constraints_kernel_stm), dim3(grid), dim3(threads) , 0, 0,
ss_dfields->num_const, ss_dfields->constnames, ss_dfields->constm,
num_var, var_const_max,
ssg_cst_num_var, ssg_cst_constnames, ssg_cst_constm,
ssg_anlys_num_var, ssg_anlys_constnames, ssg_anlys_constm,
is_uniform_var_width, is_equal_const_size);
printf("-------original const---------\n");
/* print_constraint_stream(num_var, var_const_max, ss_in, is_uniform_var_width, is_equal_const_size);
SbaStream *ss_reflection = copy_from_SbaStream(num_var, var_const_max, ss_out_constraints);
printf("-------reflection---------\n");
print_constraint_stream(num_var, var_const_max, ss_reflection, is_uniform_var_width, is_equal_const_size);
printf("-------pure const---------\n");
print_constraint_stream(num_var, var_const_max, ss_out_constraints, is_uniform_var_width, is_equal_const_size);
printf("-------analysis-init--------\n");
print_constraint_stream(num_var, var_const_max, ss_out_analysis, is_uniform_var_width, is_equal_const_size);
// check if constraint stream is empty.
int empty_const = is_constraintstream_empty(num_var, var_const_max, ss_out_constraints);
printf("constraint is empty? %d\n", empty_const);
int while_iter_count = 0;
while(!empty_const)
{
int i;
byte* access_lock = (byte*) malloc (sizeof (byte) * num_var);
for(i=0;i<num_var;i++) {
access_lock[i] = '\0'; // lock deactivated -> okey to update.
// lock activated if '\1'
//printf("lock[%d] = %c\n", i, access_lock[i]);
}
SbaStream *ss_out_new_constraints = SbaStream_init_empty(num_var);
//init_constraints_kernel<<grid, threads>>();
printf("%dth Iteration\n------------------------------------------------\nGiven ss_out_costraints:\n", while_iter_count);
print_constraint_stream(num_var, var_const_max, ss_out_constraints, is_uniform_var_width, is_equal_const_size);
print_constraint_stream(num_var, var_const_max, ss_out_analysis, is_uniform_var_width, is_equal_const_size);
//iterate for all var / constraints(var).-> fill up ss_out_new_constraints.
for(grid=0;grid < num_var;grid++) {
printf("-------------------------------------------\nVarNO:%d, NUMber of constraint:%d, w/ thread:%d\n",
grid, ss_out_constraints->num_const[grid], threads);
for(threads=0;threads < ss_out_constraints->num_const[grid];threads++) {
solve_constraints_kernel(ss_reflection, ss_out_constraints, num_var, access_lock,
ss_out_new_constraints, ss_out_analysis, grid, threads,
var_const_max, &empty_const);
}
}
//printf("------------------------------------------------\nNewly updated ss_out_costraints:");
//print_constraint_stream(num_var, var_const_max, ss_out_constraints, is_uniform_var_width, is_equal_const_size);
//printf("------------------------------------------------\nNewly updated ss_out_analysis:");
//print_constraint_stream(num_var, var_const_max, ss_out_analysis, is_uniform_var_width, is_equal_const_size);
ss_out_constraints = ss_out_new_constraints;
while_iter_count++;
empty_const = is_constraintstream_empty(num_var, var_const_max, ss_out_constraints);
printf("constraint is empty? or not? %d -> check below:\nFinal constraints:", empty_const);
//print_constraint_stream(num_var, var_const_max, ss_out_constraints);
}
printf("While loop ended:%d\n", empty_const);
printf("\nreflection printing\n");
print_constraint_stream(num_var, var_const_max, ss_reflection, is_uniform_var_width, is_equal_const_size);
//printf("\nout_constraint printing\n");
//print_constraint_stream(num_var, var_const_max, ss_out_constraints, is_uniform_var_width, is_equal_const_size);
printf("\nout_analysis printing\n");
print_constraint_stream(num_var, var_const_max, ss_out_analysis, is_uniform_var_width, is_equal_const_size);
printf ("Total iteration of kernel:%d\n", while_iter_count);
*/
}
extern "C" int ffi_tester (int x)
{
int i;
for(i=0;i<x;i++) {
printf("ffi on testing : %d\n", i);
}
return 0;
}
| 9e4ebe53815313f575117425d745682fff638d45.cu | #include <stdio.h>
// gpu - cuda includes
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cutil_common.h"
#include <gstm_common.h>
#include "gstm_kernel.h"
extern "C" int var_size_valid(byte name, int th)
{
int num_var = constname2numconstvar(name);
if(th < num_var) return 1;
else return 0;
}
// const: pointer to a constraint [ name var1 var2 ...]
// th: order in sequence of variable
// var_const_max: bytes to represent the amount of variable name.
// 1) get name, 2) check range, 3) return ptr to the variable at th.(No conversion)
// output vpos: pointer to variable byte string or NULL
extern "C" void get_varstr_inconst(byte* aconst, int th, size_t var_const_max, byte* vpos, int is_uniform_var_width)
{
byte constname = aconst[0];
int offset = (is_uniform_var_width)? var_const_max : 1;
if(var_size_valid(constname, th)) {
int i;
for(i=0;i<var_const_max;i++)
vpos[i] = aconst[th * var_const_max + offset];
}
else {
printf("Err: get_variable_inconst - th is bigg/equal to number of variable for given constraint. constname:%c, requested varidx:%d\n", constname, th);
vpos = 0;
}
}
// output: variable value (as integer).
extern "C" int get_variable_inconst(byte* aconst, int th, size_t var_const_max, int is_uniform_var_width)
{
printf("get_variable in _const:%d\n", aconst[0]);
print_a_constraint(aconst, var_const_max, is_uniform_var_width);
// var_str: pointer to byte array where th'th variable sits
byte* p_var_str = (byte*)malloc(sizeof(byte) * var_const_max);
get_varstr_inconst(aconst, th, var_const_max, p_var_str, is_uniform_var_width);
printf("-1, var_str:%s at %dth\n", p_var_str, th);
int* p_var_num = (int*)malloc (sizeof(int));
bytes2int(p_var_str, p_var_num, var_const_max);
printf("-2, *p_var_num = %d\n", *p_var_num);
return *p_var_num;
}
// output 0, if c is neither analysis name nor constraint name
// output 1, if either analysis name or constraint name
extern "C" int is_const_name(byte c)
{
int var_size = constname2numconstvar(c);
if(0 == var_size) return 0;
else return 1;
}
extern "C" int is_constraintstream_empty(int num_var, size_t var_const_max, SbaStream* ss_const)
{
int empty = 1; // set empty = TRUE.
// check only num_const.
int* num_const = ss_const->num_const;
int j;
for(j=0;j<num_var;j++) {
if((num_const != 0) && (num_const[j] != 0)) empty = 0; // empty==FALSE -> not empty
}
return empty;
/*
byte** constnames = ss_const->constnames;
byte** constm = ss_const->constm;
int k, l, m;
//check content.
for(k=0;k<num_var;k++) {
int offset = 0;
int num_const_var = num_const[k];
byte* cname = constnames[k];
for(l=0;l<num_const_var;l++) {
// is either analysis or constraint name, then empty = 0 (means not empty).
if (is_const_name (constm[k][offset])) {
empty = 0;
return empty;
}
int const_length = (cname[l], var_const_max, 1); // uniform_width
offset += const_length;
}
}
// at this point, constraint is turned out to be empty.
return empty;
*/
}
// allocate space as same as input arguments
extern "C" int* init_from_num_const(int num_var, int value, GpuFlag gpu_flag)
{
int* out_num_const;
size_t num_const_size = num_var * sizeof(int);
if(gpu_flag) { // gpu
CudaSafeCall(cudaMalloc((void**)&out_num_const, num_const_size));
CudaSafeCall(cudaMemset((void*)out_num_const, value, num_const_size));
}
else {
out_num_const = (int*)malloc(num_const_size);
memset((void*) out_num_const, value, num_const_size);
}
return out_num_const;
}
// value is initial values in the new storage
extern "C" byte** init_from_constnames(int num_var, int* num_const, byte value, GpuFlag gpu_flag)
{
int k;
int mx_num_const;
size_t sz_num_var = sizeof(int) * num_var;
printf("-----1.2-----\n");
if(gpu_flag) { // num_var is host, num_const : byte** points gpu location.
int* h_num_const = (int*)malloc(sz_num_var);
CudaSafeCall(cudaMemcpy(h_num_const, num_const, sz_num_var, cudaMemcpyDeviceToHost));
mx_num_const = max_num_constraints(num_var, h_num_const);
}
else{
mx_num_const = max_num_constraints(num_var, num_const);
}
size_t constnamesp_size = sizeof (byte*) * num_var;
byte** out_constnames;
if(gpu_flag) { // gpu
byte** dcnp = (byte**)malloc(constnamesp_size);;
int* nck = (int*)malloc(sz_num_var);
cudaMemcpy((void*)nck, num_const, sz_num_var, cudaMemcpyDeviceToHost);
for(k=0;k<num_var;k++) {
size_t constnames_size = sizeof(byte) * nck[k];
CudaSafeCall(cudaMalloc((void**) dcnp + k, constnames_size));
CudaSafeCall(cudaMemset((void*) *(dcnp + k), value, constnames_size));
}
CudaSafeCall(cudaMalloc((void**)&out_constnames, constnamesp_size));
cudaMemcpy(out_constnames, dcnp, constnamesp_size, cudaMemcpyDeviceToDevice);
}
else {
out_constnames = (byte**) malloc (constnamesp_size);
for(k=0;k<num_var;k++) {
out_constnames[k] = (byte*) malloc (sizeof(byte) * mx_num_const);
memset((void*)out_constnames[k], value, (size_t)mx_num_const);
}
}
return out_constnames;
}
// value is initial values in the new storage
// num_const is gpu address
extern "C" byte** init_from_constm(int num_var, int* num_const, size_t var_const_max, byte value, GpuFlag gf, int is_uniform_var_width)
{
// preparing for answer storage by taking biggest constraint with maximum number of constraints
// for every variable. (sufficiently large area)
size_t sz_num_var = sizeof(int) * num_var;
int mx_const_byte = longest_constraint_byte(var_const_max, is_uniform_var_width);
int mx_num_const;
if(gf) { // num_var is host, num_const : byte** points gpu location.
int* h_num_const = (int*)malloc(sz_num_var);
CudaSafeCall(cudaMemcpy(h_num_const, num_const, sz_num_var, cudaMemcpyDeviceToHost));
mx_num_const = max_num_constraints(num_var, h_num_const);
}
else{
mx_num_const = max_num_constraints(num_var, num_const);
}
size_t constmp_size = num_var*sizeof(byte*);
byte** out_constm;
int k;
if(gf) { // gpu
byte** dstmp = (byte**)malloc(constmp_size);;
int* nck = (int*)malloc(sz_num_var);
cudaMemcpy((void*)nck, num_const, sz_num_var, cudaMemcpyDeviceToHost);
for(k=0;k<num_var;k++) {
size_t constm_size = sizeof(byte) * nck[k] * mx_const_byte;
CudaSafeCall(cudaMalloc((void**) dstmp + k, constm_size));
CudaSafeCall(cudaMemset((void*) *(dstmp + k), value, constm_size));
}
CudaSafeCall(cudaMalloc((void***)&out_constm, constmp_size));
cudaMemcpy(out_constm, dstmp, constmp_size, cudaMemcpyDeviceToDevice);
}
else {
out_constm = (byte**) malloc (sizeof(byte*) * num_var);
for(k=0;k<num_var;k++) {
int size_out = mx_const_byte * mx_num_const;
out_constm[k] = (byte*) malloc (sizeof (byte) * size_out);
memset((void*)out_constm[k], value, (size_t)size_out);
}
}
return out_constm;
}
extern "C" SbaStream* SbaStream_init_empty (int num_var)
{
SbaStream *ss = (SbaStream*)malloc(sizeof(SbaStream));
ss->num_const = (int*)malloc(sizeof(int) * num_var);
ss->constnames = (byte**)malloc(sizeof(byte*) * num_var);
ss->constm = (byte**)malloc(sizeof(byte*) * num_var);
int i;
for(i=0;i<num_var;i++) {
ss->num_const[i] = 0;
ss->constnames[i] = 0;
ss->constm[i] = 0;
}
//printf ("ss:%p, num_const:%p, constnames:%p, constm:%p\n", ss, ss->num_const, ss->constnames, ss->constm);
return ss;
}
// currently when gpu_flag==1,this proc doesn't do valid operation.
extern "C" void init_from_SbaStream(int num_var, size_t var_const_max, SbaStream* ss_in, SbaStream** ss_out, GpuFlag gf, int is_uniform_var_width)
{
int value_int = 0;
byte value_byte = 0;
if(gf) // gpu initialization
{
int* num_const = init_from_num_const(num_var, value_int, gf);
// constnames
SbaStream* ssh_in = (SbaStream*)malloc(sizeof(SbaStream));
CudaSafeCall(cudaMemcpy ((void*)ssh_in, ss_in, sizeof(SbaStream), cudaMemcpyDeviceToHost));
printf("---2---\n");
byte** constnames = init_from_constnames(num_var, ssh_in->num_const, value_byte, gf);
printf("---3---\n");
byte** constm = init_from_constm(num_var, ssh_in->num_const, var_const_max, value_byte, gf, is_uniform_var_width);
printf("---4---\n");
SbaStream* ss_store;
CudaSafeCall(cudaMalloc((void**) & ss_store, sizeof(SbaStream)));
printf("---5---\n");
CudaSafeCall(cudaMemcpy((void*) ss_store->num_const, num_const, sizeof(int*), cudaMemcpyDeviceToDevice));
printf("---6---\n");
CudaSafeCall(cudaMemcpy((void*) ss_store->constnames, constnames, sizeof(byte**), cudaMemcpyDeviceToDevice));
printf("---7---\n");
CudaSafeCall(cudaMemcpy((void*) ss_store->constm, constm, sizeof(byte**), cudaMemcpyDeviceToDevice));
printf("---8---\n");
CudaSafeCall(cudaMemcpy((void*) *ss_out, ss_store, sizeof(SbaStream*), cudaMemcpyDeviceToDevice));
}
else // gpu_flag==0 host
{
int* num_const = init_from_num_const(num_var, value_int, gf);
byte** constnames = init_from_constnames(num_var, ss_in->num_const, value_byte, gf);
byte** constm = init_from_constm(num_var, ss_in->num_const, var_const_max, value_byte, gf, is_uniform_var_width);
SbaStream* ss_store = (SbaStream*)malloc(sizeof(SbaStream));
ss_store->num_const = num_const;
ss_store->constnames = constnames;
ss_store->constm = constm;
*ss_out = ss_store;
}
return;
}
extern "C" int* copy_from_num_const(int num_var, int* num_const)
{
int i;
int* out_num_const = (int*)malloc(sizeof(int) * num_var);
for(i=0;i<num_var;i++)
out_num_const[i] = num_const[i];
return out_num_const;
}
extern "C" byte** copy_from_constnames(int num_var, int* num_const, byte** constnames)
{
int i,j;
byte** out_constnames = (byte**) malloc (sizeof(byte*) * num_var);
for(i=0;i<num_var;i++) {
out_constnames[i] = (byte*) malloc (sizeof(byte) * num_const[i]);
for(j=0;j<num_const[i];j++)
out_constnames[i][j] = constnames[i][j];
}
printf("copy_from_constnames:%p memcpyn", out_constnames);
return out_constnames;
}
extern "C" byte** copy_from_constm(int num_var, int* num_const, byte** constname, size_t var_const_max, byte** constm)
{
int i,j;
byte** out_bytes = (byte**) malloc (sizeof(byte*) * num_var);
size_t* vconstm_size = (size_t*) malloc (sizeof (size_t) * num_var);
for(i=0;i<num_var;i++) {
for(j=0;j<num_const[i];j++)
vconstm_size[i] += constraint_length(constname[i][j], var_const_max, 1);
}
for(i=0;i<num_var;i++) {
int constsize_byte = vconstm_size[i];
byte* vconst_src = constm[i];
byte* vconst_dst = (byte*) malloc (sizeof(byte) * constsize_byte);
memcpy(vconst_dst, vconst_src, constsize_byte);
out_bytes[i] = vconst_dst;
}
return out_bytes;
}
// host code.
extern "C" SbaStream* copy_from_SbaStream(int num_var, size_t var_const_max, SbaStream* ss_in)
{
//printf("-------reflection---------\n");
int* ref_num_const = copy_from_num_const (num_var, ss_in->num_const);
byte** ref_constnames = copy_from_constnames(num_var, ss_in->num_const,
ss_in->constnames);
byte** ref_constm = copy_from_constm(num_var, ss_in->num_const,
ss_in->constnames, var_const_max, ss_in->constm);
SbaStream *ss_out = (SbaStream*)malloc(sizeof(SbaStream));
ss_out->num_const = ref_num_const;
ss_out->constnames = ref_constnames;
ss_out->constm = ref_constm;
return ss_out;
}
// ss_f (from) --> ss_t (to)
// currently implemented from HOST to DEVICE
extern "C" void sbastream_alloc_copy(int num_var, size_t var_const_max, SbaStream* ss_f, SbaStream** ss_t, cudaMemcpyKind direction)
{
// HOST -> Device
if(direction == cudaMemcpyHostToDevice) {
// Number of constraints ///////////////////////////////////////////
int* num_const_f = ss_f->num_const; //array of number_of_constraints
size_t num_const_size = num_var * sizeof(int);
int* num_const_t = 0; // pointer to destination array of num_const
CudaSafeCall(cudaMalloc((void**)&num_const_t, num_const_size));
cudaMemcpy(num_const_t, num_const_f, num_const_size, direction);
printf("num_const:%p, %d\n", num_const_f, num_const_f[0]);
size_t pconsts_size = num_var * sizeof(byte*);
// Constraint Names ///////////////////////////////////////////
byte** constnames_f = ss_f->constnames;
byte** h_pcn = (byte**)malloc(num_var * sizeof (byte*));
int i;
for (i=0;i<num_var;i++) {
size_t cnlength = num_const_f[i] * sizeof(byte);
printf ("constname at %d(%d), %p \n", i, cnlength, constnames_f[i]);
if(cnlength > 0) printf ("first char:%c\n", constnames_f[i][0]);
cudaMalloc((void**) h_pcn+i, cnlength);
cudaMemcpy(*(h_pcn+i), constnames_f[i], cnlength, cudaMemcpyHostToDevice);
}
byte** constnames_t;
cudaMalloc((byte**) &constnames_t, pconsts_size);
CudaSafeCall(cudaMemcpy(constnames_t, h_pcn, pconsts_size, cudaMemcpyHostToDevice));
// constraint stream: constm ///////////////////////////////////////////
byte** constm_f = ss_f->constm;
byte** h_pctm = (byte**)malloc(num_var * sizeof (byte*));
for (i=0;i<num_var;i++) {
size_t constms_byte = sizeof(byte) * sum_const_sizes(constnames_f[i], num_const_f[i], var_const_max);
cudaMalloc((void**) h_pctm + i, constms_byte);
cudaMemcpy(*(h_pctm + i), constm_f[i], constms_byte, cudaMemcpyHostToDevice);
if(constm_f[i] != 0) printf("constm(%d) first char:%c\n", i, constm_f[i][0]);
else printf("cosntm(%d) is null\n", i);
}
byte** constm_t = 0; // device
cudaMalloc((void**) &constm_t, pconsts_size);
cudaMemcpy(constm_t, h_pctm, pconsts_size, cudaMemcpyHostToDevice);
// SbaStream //////////////////////////////////////////////
SbaStream* ss_htmp = (SbaStream*)malloc(sizeof(SbaStream));
ss_htmp->num_const = num_const_t;
ss_htmp->constnames = constnames_t;
ss_htmp->constm = constm_t;
printf("Sbastm is made, %p, %p, %p\n", ss_htmp->num_const, ss_htmp->constnames, ss_htmp->constm);
cudaMalloc(ss_t, sizeof(SbaStream));
cudaMemcpy((void*) *ss_t, ss_htmp, sizeof(SbaStream), cudaMemcpyHostToDevice);
}
// Device -> HOST
else {
SbaStream* tmp = (SbaStream*)malloc(sizeof(SbaStream));
cudaMemcpy(tmp, ss_f, sizeof(SbaStream), cudaMemcpyDeviceToHost);
printf("upto here, %p, %p, %p\n", tmp->num_const, tmp->constnames, tmp->constm);
// num_const
int num_const_size = sizeof(int) * num_var;
int* num_const_h = (int*)malloc(num_const_size);
cudaMemcpy(num_const_h, tmp->num_const, num_const_size, cudaMemcpyDeviceToHost);
int i;
for(i=0;i<num_var;i++) {
printf("numconst at %d = %d\n", i, num_const_h[i]);
}
// constnames
int cnp_size = num_var* sizeof(byte*);
byte** cn_ptr = (byte**)malloc(cnp_size);
cudaMemcpy(cn_ptr, tmp->constnames, cnp_size, cudaMemcpyDeviceToHost);
printf("ptr-cn fetch\n");
byte** cnp = (byte**) malloc (num_var * sizeof(byte*));
for(i=0;i<num_var;i++) {
size_t constnames_size = sizeof(byte) * num_const_h[i];
cnp[i] = (byte*) malloc (constnames_size);
cudaMemcpy(cnp[i], cn_ptr[i], constnames_size, cudaMemcpyDeviceToHost);
int j;
for(j=0;j<constnames_size;j++)
printf("%dth byte:%c\n", j, cnp[i][j]);
}
// constm
byte** cstm_ptr = (byte**)malloc(cnp_size);
cudaMemcpy(cstm_ptr, tmp->constm, cnp_size, cudaMemcpyDeviceToHost); // pointers
printf("ptr-stc fetch, %d\n", num_var);
byte** cstmp = (byte**) malloc (num_var * sizeof(byte*));
for(i=0;i<num_var;i++) {
size_t constms_byte = sizeof(byte) * sum_const_sizes(cnp[i], num_const_h[i], var_const_max);
cstmp[i] = (byte*) malloc (constms_byte);
cudaMemcpy(cstmp[i], cstm_ptr[i], constms_byte, cudaMemcpyDeviceToHost);
int j;
for(j=0;j<constms_byte;j++)
printf("%dth byte:%d\n", j, cstmp[i][j]);
}
printf("constm end\n");
*ss_t = (SbaStream*)malloc(sizeof(SbaStream));
(*ss_t)->num_const = num_const_h;
(*ss_t)->constnames = cnp;
(*ss_t)->constm = cstmp;
}
return;
}
//always Host to Device
extern "C" void csr_alloc_copy(int num_var, int num_tot_const, int max_num_const,
int** d_ptr, int** d_indices, byte** d_data,
int* s_ptr, int* s_indices, byte* s_data,
size_t sz_a_const,
cudaMemcpyKind direction) {
// CSR structure
size_t sz_ptr = sizeof(int) * (max_num_const + 1);
size_t sz_indices = sizeof(int) * num_tot_const;
size_t sz_data = sz_a_const * num_tot_const;
printf(" csr_alloc_copy, sz_ptr:%d\n", sz_ptr);
printf(" csr_alloc_copy, sz_indices:%d\n", sz_indices);
printf(" csr_alloc_copy, sz_data:%d\n", sz_data);
if(direction == cudaMemcpyHostToDevice) {
CudaSafeCall(cudaMalloc(d_ptr, sz_ptr));
CudaSafeCall(cudaMalloc(d_indices, sz_indices));
CudaSafeCall(cudaMalloc(d_data, sz_data));
}
else if(direction == cudaMemcpyDeviceToHost) {
*d_ptr = (int*) malloc(sz_ptr);
*d_indices = (int*) malloc(sz_indices);
*d_data = (byte*) malloc(sz_data);
}
else {
printf("csr_alloc_copy() - error - csr_alloc_copy only supports H2D, D2H\n");
exit(1);
}
CudaSafeCall(cudaMemcpy(*d_ptr, s_ptr, sz_ptr, direction));
CudaSafeCall(cudaMemcpy(*d_indices, s_indices, sz_indices, direction));
CudaSafeCall(cudaMemcpy(*d_data, s_data, sz_data, direction));
return;
}
// input : uniform var-length, uniform constraint length
// 1 if constraint sizes are uniform
// 0 otherwise
extern "C" int warning_for_non_csr(int is_uniform_var_width, int is_equal_const_size) {
// if all constraints are in equal length, then each variable in a constratint have equal length too.
if(is_equal_const_size == 1) //is_uniform_var_width = 1;
return 1;
else if(is_uniform_var_width) {
printf("sba_solver_csr: This case cannot happen:constraints are not same is length, and they have uniform width in variable\n");
exit(1);
}
else {
printf("sba_solver_csr: This routine only support - equal sized constraints -and- variable lengths in constraint are uniform\n");
exit(1);
}
return 0;
}
// given input from Racket(list of list), produces CSR
// copy of code in project: xform_global_shared
extern "C" void transform_const_csr(int num_var, int num_tot_const, int max_num_const, int* num_const, byte** constm,
int* ptr, int* indices, byte* data, size_t sz_a_const, int is_uniform_var_width, int is_equal_const_size)
{
int new_uniform_var_width = warning_for_non_csr(is_uniform_var_width, is_equal_const_size);
if(new_uniform_var_width) is_uniform_var_width = 1;
int i,j,k,l;
k=0, l=0; // l: num of valid consts
// for(i=0;i<num_var;i++)
// for(j=0;j<num_const[i];j++) {
// k++;
// if(k>8389000) {
// printf("srcccccc%d th \t", k);
// print_a_constraint(&(constm[i][j]), 4, is_uniform_var_width);
// }
// }
//
// k=0;
for(i=0;i<max_num_const;i++) {
for(j=0;j<num_var;j++) {
if (i < num_const[j]) {
byte* p_const = constm[j]+ i*sz_a_const;
memcpy(data + k*sz_a_const, p_const, sz_a_const); // general version
// data[k] = constm[j][i]; // valid only when sz_a_const = 1
indices[k] = j;
l++;
k++;
// if(k > 8389000) {
// printf("src-const[%d]:\t", k);
// print_a_constraint(p_const, 4, is_uniform_var_width);
// }
}
}
ptr[i+1] = l;
}
// for(k=0;k<num_var;k++)
// printf("num const[%d:%d\n", k, num_const[k]);
// for(j=0;j<=max_num_const;j++)
// printf("transform_const's ptr[%d]:%d\n", j, ptr[j]);
return;
}
// num_elt : length of input array in cpu
// gnum_elt: pointer to length of input arary in gpu
// g_elts: pointer to the array in GPU
// output: pointer to array that contains the sum at the first location in GPU
extern "C" int* sum_gpu_kernel_int32(int num_elt, int* gnum_elt, int* gelts)
{
size_t sz_ull = sizeof(int);
size_t sz_elts = sz_ull * num_elt;
int* gnum_block;
cudaMalloc(&gnum_block, sz_ull);
cudaMemset(gnum_block, 0, sz_ull);
dim3 block1 = dim3 ((int) fmin((double)512, (double)num_elt/2.0), 1); // 512 threads deals with 1024 data.
int num_blk;
num_blk = (num_elt > block1.x)? num_elt / (2 * block1.x): 1; // because each thread process 2 elements
// return storage gsum
size_t sz_gsum = sz_ull * num_blk;
int *gsum;
cudaMalloc(&gsum, sz_gsum);
cudaMemset(gsum, 0, sz_gsum);
/*
unsigned int timerg_exe = 0;
cutCreateTimer(&timerg_exe);
cutResetTimer(timerg_exe);
cutStartTimer(timerg_exe);
*/
// Timer Event Prepare
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
// Start record
cudaEventRecord(kernel_start, 0);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
do {
// printf("--------------do-------------\nnumelt:%d\n", num_elt);
// int* tmpelt = (int*)malloc(sz_elts);
// cudaMemcpy(tmpelt, gelts, sz_elts, cudaMemcpyDeviceToHost);
// for(i=0;i<num_elt;i++)
// printf("tmpelt[%d] = %llu\n", i, tmpelt[i]);
block1 = dim3 ((int) fmin((double)512, (double)num_elt/2.0), 1); // 512 threads deals with 1024 data.
num_blk = (num_elt > block1.x)? num_elt / (2 * block1.x): 1; // because each thread process 2 elements
int mxgrd [] = {65535, 65535, 1};
printf("grid max = %d, %d blocks\n", mxgrd[0], mxgrd[1]);
int gridx = (num_blk > mxgrd[0])? mxgrd[0] : num_blk;
int gridy = (num_blk > mxgrd[0])? num_blk / mxgrd[0] + 1: 1;
printf("num_blk:%d\n", num_blk);
printf("grid dimension x, y = %d, %d\n", gridx, gridy);
dim3 grid1 = dim3(gridx, gridy);
size_t shds1 = 2 * block1.x * sizeof(int); // need factor 2 because a thread add 2 elements.
printf("blockdim.x = %d threads, shd size = %d bytes\n", block1.x, shds1);
// clear used location
size_t sz_gsum = sz_ull * num_blk;
// new grid, block, shds
sum_kernel_int32<<<grid1, block1, shds1>>>(gnum_elt, gelts, gnum_block, gsum);
num_elt = num_blk;
sz_elts = sz_gsum;
// interchange:
int* tmp_num_elt = gnum_elt;
int* tmp_elts = gelts;
gnum_elt = gnum_block;
gelts = gsum;
gnum_block = tmp_num_elt;
gsum = tmp_elts;
// // copy output by printing next inputs
// int* cnum_elt = (int*)malloc(sz_ull);
// cudaMemcpy(cnum_elt, gnum_elt, sz_ull, cudaMemcpyDeviceToHost);
// printf("next - numelt:%d\n", *cnum_elt);
// int i;
// int* celts = (int*)malloc(sz_elts);
// cudaMemcpy(celts, gelts, sz_elts, cudaMemcpyDeviceToHost);
// for(i=0;i<(int)*cnum_elt;i++)
// printf("%d th next elt:%llu\n", i, celts[i]);
} while (num_blk != 1);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
cutStopTimer(timerg_exe);
double tvg = cutGetTimerValue(timerg_exe);
printf("gpu time glb(kernel):\n %e \n(%f)(ms)\n", tvg, tvg);
cutDeleteTimer(timerg_exe);
*/
// Stop event
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
float kernel_elapsedTime;
cudaEventElapsedTime(&kernel_elapsedTime, kernel_start, kernel_stop); // that's our time!
// Clean up:
cudaEventDestroy(kernel_start);
cudaEventDestroy(kernel_stop);
printf("gpu time glb(kernel):\n %e \n(%f)(ms)\n", kernel_elapsedTime, kernel_elapsedTime);
return gelts;
}
//__global__ void add1(int* x)
//{
// *x = *x + 100;
// return;
//}
// convert ss_in into CSR
// copy CSR
// return back ss_out_analysis
extern "C" void sba_solve_csr(SbaStream* ss_in, int num_var, size_t var_const_max, SbaStream* ss_out_analysis,
int is_uniform_var_width, int is_equal_const_size)
{
int i;
int new_uniform_var_width = warning_for_non_csr(is_uniform_var_width, is_equal_const_size);
if(new_uniform_var_width) is_uniform_var_width = 1;
// uniform width for constraints
size_t sz_a_const = var_const_max * 4; // name + 3 references to variables.
// SbaStream -> num_tot_const
int num_tot_const = 0;
for(i=0;i<num_var;i++) {
num_tot_const += ss_in->num_const[i];
}
printf("tot_const:%d before convert to ...\n", num_tot_const);
// SbaStream -> max_num_const maximum number of constraints
int max_num_const = ss_in->num_const[0];
for(i=1;i<num_var;i++) {
int numcst = ss_in->num_const[i];
if(numcst > max_num_const) max_num_const = numcst;
}
// 3 vars for CSR structure.
int* ptr = (int*)malloc(sizeof(int) * (1 + max_num_const));
int* indices = (int*)malloc(sizeof(int) * num_tot_const);
byte* data = (byte*)malloc(sizeof(byte) * num_tot_const * sz_a_const);
//print_constraint_stream(num_var, var_const_max, ss_in, is_uniform_var_width, is_equal_const_size, 1000);
transform_const_csr(num_var, num_tot_const, max_num_const, ss_in->num_const, ss_in->constm,
ptr, indices, data, sz_a_const, is_uniform_var_width, is_equal_const_size);
//print_constraints_csr(num_var, num_tot_const, var_const_max, ptr, indices, data, sz_a_const, is_uniform_var_width, is_equal_const_size, 1000);
//////////////////////////////////////////////////////////////
//begin gpu timer
/*
unsigned int timerg_all = 0;
cutCreateTimer(&timerg_all);
cutResetTimer(timerg_all);
unsigned int timerg_exe = 0;
cutCreateTimer(&timerg_exe);
cutResetTimer(timerg_exe);
cutStartTimer(timerg_all);
*/
// Timer Event Prepare
cudaEvent_t all_start, all_stop;
cudaEventCreate(&all_start);
cudaEventCreate(&all_stop);
// Start record
cudaEventRecord(all_start, 0);
// copy to gpu
int* g_ptr;
int* g_indices;
byte* g_data;
csr_alloc_copy(num_var, num_tot_const, max_num_const,
&g_ptr, &g_indices, &g_data,
ptr, indices, data,
sz_a_const, cudaMemcpyHostToDevice);
// // testing to gpu copy by copying back to cpu
// int* h_ptr;
// int* h_indices;
// byte* h_data;
// csr_alloc_copy(num_var, num_tot_const, max_num_const,
// &h_ptr, &h_indices, &h_data,
// g_ptr, g_indices, g_data,
// sz_a_const, cudaMemcpyDeviceToHost);
//
// printf("num_total constraints:%d\n", num_tot_const);
// print_constraints_csr(num_var, num_tot_const, var_const_max, h_ptr, h_indices, h_data, sz_a_const, is_uniform_var_width, is_equal_const_size, 1000);
//
// output matrices of init_constraints_kernel()
size_t sz_mat = sz_a_const * num_var * max_num_const;
byte* g_constm;
CudaSafeCall(cudaMalloc(&g_constm, sz_mat));
CudaSafeCall(cudaMemset(g_constm, 0, sz_mat));
byte* g_asis;
CudaSafeCall(cudaMalloc(&g_asis, sz_mat));
CudaSafeCall(cudaMemset(g_asis, 0, sz_mat));
// lock, varnum for matrices g_const, g_asis
size_t sz_locks = sizeof(int) * num_var;
size_t sz_varnums = sizeof(int) * num_var;
int* g_lock_const;
CudaSafeCall(cudaMalloc(&g_lock_const, sz_locks));
CudaSafeCall(cudaMemset(g_lock_const, 0, sz_locks));
int* g_varnum_const;
CudaSafeCall(cudaMalloc(&g_varnum_const, sz_varnums));
CudaSafeCall(cudaMemset(g_varnum_const, 0, sz_varnums));
int* g_lock_asis;
CudaSafeCall(cudaMalloc(&g_lock_asis, sz_locks));
CudaSafeCall(cudaMemset(g_lock_asis, 0, sz_locks));
int* g_varnum_asis;
CudaSafeCall(cudaMalloc(&g_varnum_asis, sz_varnums));
CudaSafeCall(cudaMemset(g_varnum_asis, 0, sz_varnums));
int is_const_empty = 1;
int *g_is_const_empty;
CudaSafeCall(cudaMalloc(&g_is_const_empty, sizeof(int)));
CudaSafeCall(cudaMemcpy(g_is_const_empty, &is_const_empty, sizeof(int), cudaMemcpyHostToDevice));
// initialize const-mat, asis-mat from ptr, indices, data
// each matrix is size (max_num_const x num_var),
// iteration is as long as num_tot_const
// read indices, data write to the matrix.
// row in matrix is defined by 1) varnum[] - current empty sp in mat, 2)lock[] - gateway showing accessability.
unsigned int binary_num_tot_const = least_upper_binary(num_tot_const);
// printf("binary upper bound of total const: %d\n", binary_num_tot_const);
/*
size_t cpyamount_device =
sizeof(int) * (max_num_const + 1) //ptr
+ sizeof(int) * num_tot_const //indices
+ sz_a_const * num_tot_const //data
+ 2*sz_mat + 2*sz_locks + 2*sz_varnums;
*/
int max_threads = (int) fmin(binary_num_tot_const, (float)pow(2,8)); //256
dim3 block_init(max_threads, 1);
dim3 grid_init(binary_num_tot_const/max_threads,1);
// if(block.x * grid.x >= 512*65536) {
// printf("cuda hw cannot support so many constraints\n");
// exit(1);
// }
// else
// printf("global memory was occupied by copy: %d bytes\n", cpyamount_device);
/*
cutStartTimer(timerg_exe);
*/
init_constraints_kernel_csr<<<grid_init, block_init>>>(
num_var, max_num_const, num_tot_const, sz_a_const, // num of variables, longest const, total const, single const size
g_indices, g_data, // input data in CSR form
g_lock_const, g_varnum_const, // lock, positioner for new constm
g_constm, // storage for new constm
g_lock_asis, g_varnum_asis, // lock, positioner for new asis
g_asis,
g_is_const_empty); // set to 0 (not empty) is kernel read constraint during initialization
// solve_constraint kernel
printf("INIT-> no constraint exist\n");
size_t sz_num_var = sizeof(int) * num_var;
int* h_varnum_const = (int*)malloc(sz_num_var);
CudaSafeCall(cudaMemcpy(h_varnum_const, g_varnum_const, sz_num_var, cudaMemcpyDeviceToHost));
for(i=0;i<num_var;i++)
printf("org varnum_const[%d] = %d\n", i, h_varnum_const[i]);
int* h_varnum_asis = (int*)malloc(sz_num_var);
CudaSafeCall(cudaMemcpy(h_varnum_asis, g_varnum_asis, sz_num_var, cudaMemcpyDeviceToHost));
for(i=0;i<num_var;i++)
printf("org varnum_asis[%d] = %d\n", i, h_varnum_asis[i]);
// reflection definition
int* g_varnum_refl;
CudaSafeCall(cudaMalloc(&g_varnum_refl, sz_varnums));
CudaSafeCall(cudaMemcpy(g_varnum_refl, g_varnum_const, sz_varnums, cudaMemcpyDeviceToDevice));
//printf("1\n");
byte* g_reflection;
CudaSafeCall(cudaMalloc(&g_reflection, sz_mat));
CudaSafeCall(cudaMemcpy(g_reflection, g_constm, sz_mat, cudaMemcpyDeviceToDevice));
//printf("2\n");
CudaSafeCall(cudaMemcpy(&is_const_empty, g_is_const_empty, sizeof(int), cudaMemcpyDeviceToHost));
if(is_const_empty)
printf ("constraint is not empty");
else printf ("constraint is empty");
int* g_lock_new_const;
int* g_varnum_new_const;
byte* g_new_constm;
int binary_max_num_const = least_upper_binary(max_num_const);
int n_compare = binary_max_num_const * binary_max_num_const;
int max_threads_block = 512;
int bl = max_threads_block < n_compare;
if(bl) {
printf("constraint - comparisons (%d) are more than max number of threads(%d) -> exit",
n_compare, max_threads_block);
exit(1);
}
printf ("max num const: %d\n", binary_max_num_const);
int blk_c = max_num_const;
int blk_v = max_threads_block / n_compare;
dim3 block_solve(blk_v, blk_c, blk_c);
int gl = num_var / blk_v;
int grd_x = (gl)? gl : 1;
dim3 grid_solve(grd_x, 1, 1);
// this is for collecting sum of varnums in each block.
// we'll check if one of them are not zero -> iteration needed.
int* gsum_varnum_grid;
size_t sz_varnum_grid = sizeof(int) * grid_solve.x;
CudaSafeCall(cudaMalloc(&gsum_varnum_grid, sz_varnum_grid));
CudaSafeCall(cudaMemset(gsum_varnum_grid, 0, sz_varnum_grid));
// Shared memory size definition // for first kernel (solve_constraints)
size_t shd_solve = 2 * num_var * sizeof(int); // as big as to hold 2 arrays : lock_const, lock_asis
// Shared memory size definition // for second kernel (or_varnum_grid)
// for each block, we compute or_varnum_grid_kernel for the block
// gather to global memory, apply(or_varnum_grid_kernel) again until only one elt left in global.
//size_t shd_varnum = block_solve.x * sizeof(int);
int* g_sum_varnum;
CudaSafeCall(cudaMalloc(&g_sum_varnum, sizeof(int)));
CudaSafeCall(cudaMemset(g_sum_varnum, 0, sizeof(int)));
int cnt=0;
while(!is_const_empty) {
CudaSafeCall(cudaMemcpy(g_is_const_empty, &is_const_empty, sizeof(int), cudaMemcpyHostToDevice));
// new constraint - pair
CudaSafeCall(cudaMalloc(&g_lock_new_const, sz_varnums));
CudaSafeCall(cudaMemset(g_lock_new_const, 0, sz_varnums));
CudaSafeCall(cudaMalloc(&g_varnum_new_const, sz_varnums));
CudaSafeCall(cudaMemset(g_varnum_new_const, 0, sz_varnums));
CudaSafeCall(cudaMalloc(&g_new_constm, sz_mat));
CudaSafeCall(cudaMemset(g_new_constm, 0, sz_mat));
// testing sensor insertion
int sensor = 101;
int* g_sensor;
CudaSafeCall(cudaMalloc(&g_sensor, sizeof(int)));
CudaSafeCall(cudaMemcpy(g_sensor, &sensor, sizeof(int), cudaMemcpyHostToDevice));
// // test for thread assignment
// dim3 g(1,1);
// dim3 b(1,1,1);
// printf("is constraint empty? = %d\n", is_const_empty);
// printf("grid x y = %d, %d\n", grid_solve.x, grid_solve.y);
// printf("block x y z = %d, %d, %d\n", block_solve.x, block_solve.y, block_solve.z);
// g_const_sample to see throught the kernel inside.
int* g_const_sample;
CudaSafeCall(cudaMalloc(&g_const_sample, sz_a_const));
// kernel
solve_constraints_kernel<<<grid_solve, block_solve, shd_solve>>>(
num_var, max_num_const, sz_a_const,
g_varnum_refl, g_reflection,
g_varnum_const, g_constm,
g_lock_asis, g_varnum_asis, g_asis,
g_lock_new_const, g_varnum_new_const, g_new_constm, g_sensor, g_const_sample);
sensor = 99;
CudaSafeCall(cudaMemcpy(&sensor, g_sensor, sizeof(int), cudaMemcpyDeviceToHost));
printf("sensor = %d\n", sensor);
byte* sample = (byte*)malloc(sz_a_const);
CudaSafeCall(cudaMemcpy(sample, g_const_sample, sz_a_const, cudaMemcpyDeviceToHost));
print_a_constraint(sample, var_const_max, is_uniform_var_width);
// check emptyness of constraints by adding them all and get is_const_emtpy
printf("in while empty? = %d\n", is_const_empty);
int* tmpvarnum = (int*)malloc(sz_varnums);
CudaSafeCall(cudaMemcpy(tmpvarnum, g_varnum_new_const, sz_varnums, cudaMemcpyDeviceToHost));
for(i=0;i<num_var;i++)
printf("%d - varnum const [%d] = %d\n", cnt, i, tmpvarnum[i]);
cnt++;
int* gnum_var;
CudaSafeCall(cudaMalloc(&gnum_var, sizeof(int)));
CudaSafeCall(cudaMemcpy(gnum_var, &num_var, sizeof(int), cudaMemcpyHostToDevice));
g_sum_varnum = sum_gpu_kernel_int32 (num_var, gnum_var, g_varnum_new_const);
int* sum_varnum = (int*) malloc(sizeof(int));
CudaSafeCall(cudaMemcpy(sum_varnum, g_sum_varnum, sizeof(int), cudaMemcpyDeviceToHost));
printf("sum of all varnum_const = %d\n", *sum_varnum);
is_const_empty = 1;//(*sum_varnum == 0) ? 1 : 0; // sun=0 -> empty const -> is_const_emtpy = 1.
}
/*
cutStopTimer(timerg_exe);
cutStopTimer(timerg_all);
*/
printf("end of kernel invoke\n");
// test g_varnum_const, g_varnum_asis
int* varnum_const = (int*)malloc(sz_varnums);
memset(varnum_const, 0, sz_varnums);
CudaSafeCall(cudaMemcpy(varnum_const, g_varnum_const, sz_varnums, cudaMemcpyDeviceToHost));
byte* h_constm = (byte*) malloc(sz_mat);
CudaSafeCall(cudaMemcpy(h_constm, g_constm, sz_mat, cudaMemcpyDeviceToHost));
int* varnum_asis = (int*)malloc(sz_varnums);
memset(varnum_asis, 0, sz_varnums);
CudaSafeCall(cudaMemcpy(varnum_asis, g_varnum_asis, sz_varnums, cudaMemcpyDeviceToHost));
byte* h_asis = (byte*) malloc(sz_mat);
CudaSafeCall(cudaMemcpy(h_asis, g_asis, sz_mat, cudaMemcpyDeviceToHost));
printf("numvar:%d\n", num_var);
for(i=0;i<num_var;i+=1) {
printf("init -- var[%d], numconst:%d, num_ais:%d\n", i, varnum_const[i], varnum_asis[i]);
}
int* varnum_new_const = (int*)malloc(sz_varnums);
memset(varnum_new_const, 0, sz_varnums);
CudaSafeCall(cudaMemcpy(varnum_new_const, g_varnum_new_const, sz_varnums, cudaMemcpyDeviceToHost));
for(i=0;i<num_var;i+=1) {
printf("after exec -- var[%d], numconst:%d, num_ais:%d\n", i, varnum_new_const[i], varnum_asis[i]);
}
/*
double tvg = cutGetTimerValue(timerg_exe);
printf("gpu time glb(kernel):\n %e \n(%f)(ms)\n", tvg, tvg);
cutDeleteTimer(timerg_exe);
double tvga = cutGetTimerValue(timerg_all);
printf("gpu time glb(kernel+in-copy):\n %e \n(%f)(ms)\n", tvga, tvga);
cutDeleteTimer(timerg_all);
*/
// Stop event
cudaEventRecord(all_stop, 0);
cudaEventSynchronize(all_stop);
float all_elapsedTime;
cudaEventElapsedTime(&all_elapsedTime, all_start, all_stop); // that's our time!
// Clean up:
cudaEventDestroy(all_start);
cudaEventDestroy(all_stop);
printf("gpu time glb(all):\n %e \n(%f)(ms)\n", all_elapsedTime, all_elapsedTime);
// printf("printing - constraints ...\n");
// print_constraints_gpu(num_var, sz_a_const, varnum_const, h_constm, is_uniform_var_width);
// printf("printing - asis ...\n");
// print_constraints_gpu(num_var, sz_a_const, varnum_asis, h_asis, is_uniform_var_width);
return;
}
extern "C" void sba_solve_stm(SbaStream* ss_in, int num_var, size_t var_const_max, SbaStream* ss_out_analysis,
int is_uniform_var_width, int is_equal_const_size)
{
// memalloc/memcpy for ss_in, ss_outconstraints, ss_out_analysis
// ss_in : host
SbaStream* ssg_in; //device
sbastream_alloc_copy(num_var, var_const_max, ss_in, &ssg_in, cudaMemcpyHostToDevice);
printf("ssg_in\n");
// printf("test-----------------------------------------------------------------------------\n");
// SbaStream* ssh_in;
// sbastream_alloc_copy(num_var, var_const_max, ssg_in, &ssh_in, cudaMemcpyDeviceToHost);
// print_constraint_stream(num_var, var_const_max, ssh_in, is_uniform_var_width, is_equal_const_size);
//
// printf("alloc constraint/analysis--------------------------------------------------------\n");
// last arg 1 means copying at gpu
SbaStream* ss_dfields = (SbaStream*)malloc(sizeof(SbaStream));
CudaSafeCall(cudaMemcpy(ss_dfields, ssg_in, sizeof(SbaStream), cudaMemcpyDeviceToHost));
int* ssg_cst_num_var;
byte** ssg_cst_constnames;
byte** ssg_cst_constm;
ssg_cst_num_var = init_from_num_const(num_var, 0, GPU);
ssg_cst_constnames = init_from_constnames(num_var, ss_dfields->num_const, '\0', GPU);
ssg_cst_constm = init_from_constm(num_var, ss_dfields->num_const, var_const_max, '\0', GPU, is_uniform_var_width);
int* ssg_anlys_num_var;
byte** ssg_anlys_constnames;
byte** ssg_anlys_constm;
ssg_anlys_num_var = init_from_num_const(num_var, 0, GPU);
ssg_anlys_constnames = init_from_constnames(num_var, ss_dfields->num_const, '\0', GPU);
ssg_anlys_constm = init_from_constm(num_var, ss_dfields->num_const, var_const_max, '\0', GPU, is_uniform_var_width);
/* Sbastream from given SbaStream at GPU is not dealt as it is, instead each field are created independently.
// ss_out_constraints : host
SbaStream* ssg_out_constraints; // ssg_out_constraints
sbastream_alloc_copy(num_var, var_const_max, ss_out_constraints, &ssg_out_constraints, cudaMemcpyHostToDevice);
printf("ssg_out\n");
// ss_out_analysis : host
SbaStream* ssg_out_analysis; // device
sbastream_alloc_copy(num_var, var_const_max, ss_out_analysis, &ssg_out_analysis, cudaMemcpyHostToDevice);
printf("ssg_out_analysis\n");
*/
int max_const_size = max_num_constraints(num_var, ss_in->num_const);
dim3 threads(num_var, max_const_size);
dim3 grid(1);
init_constraints_kernel_stm<<< grid, threads >>>(
ss_dfields->num_const, ss_dfields->constnames, ss_dfields->constm,
num_var, var_const_max,
ssg_cst_num_var, ssg_cst_constnames, ssg_cst_constm,
ssg_anlys_num_var, ssg_anlys_constnames, ssg_anlys_constm,
is_uniform_var_width, is_equal_const_size);
printf("-------original const---------\n");
/* print_constraint_stream(num_var, var_const_max, ss_in, is_uniform_var_width, is_equal_const_size);
SbaStream *ss_reflection = copy_from_SbaStream(num_var, var_const_max, ss_out_constraints);
printf("-------reflection---------\n");
print_constraint_stream(num_var, var_const_max, ss_reflection, is_uniform_var_width, is_equal_const_size);
printf("-------pure const---------\n");
print_constraint_stream(num_var, var_const_max, ss_out_constraints, is_uniform_var_width, is_equal_const_size);
printf("-------analysis-init--------\n");
print_constraint_stream(num_var, var_const_max, ss_out_analysis, is_uniform_var_width, is_equal_const_size);
// check if constraint stream is empty.
int empty_const = is_constraintstream_empty(num_var, var_const_max, ss_out_constraints);
printf("constraint is empty? %d\n", empty_const);
int while_iter_count = 0;
while(!empty_const)
{
int i;
byte* access_lock = (byte*) malloc (sizeof (byte) * num_var);
for(i=0;i<num_var;i++) {
access_lock[i] = '\0'; // lock deactivated -> okey to update.
// lock activated if '\1'
//printf("lock[%d] = %c\n", i, access_lock[i]);
}
SbaStream *ss_out_new_constraints = SbaStream_init_empty(num_var);
//init_constraints_kernel<<grid, threads>>();
printf("%dth Iteration\n------------------------------------------------\nGiven ss_out_costraints:\n", while_iter_count);
print_constraint_stream(num_var, var_const_max, ss_out_constraints, is_uniform_var_width, is_equal_const_size);
print_constraint_stream(num_var, var_const_max, ss_out_analysis, is_uniform_var_width, is_equal_const_size);
//iterate for all var / constraints(var).-> fill up ss_out_new_constraints.
for(grid=0;grid < num_var;grid++) {
printf("-------------------------------------------\nVarNO:%d, NUMber of constraint:%d, w/ thread:%d\n",
grid, ss_out_constraints->num_const[grid], threads);
for(threads=0;threads < ss_out_constraints->num_const[grid];threads++) {
solve_constraints_kernel(ss_reflection, ss_out_constraints, num_var, access_lock,
ss_out_new_constraints, ss_out_analysis, grid, threads,
var_const_max, &empty_const);
}
}
//printf("------------------------------------------------\nNewly updated ss_out_costraints:");
//print_constraint_stream(num_var, var_const_max, ss_out_constraints, is_uniform_var_width, is_equal_const_size);
//printf("------------------------------------------------\nNewly updated ss_out_analysis:");
//print_constraint_stream(num_var, var_const_max, ss_out_analysis, is_uniform_var_width, is_equal_const_size);
ss_out_constraints = ss_out_new_constraints;
while_iter_count++;
empty_const = is_constraintstream_empty(num_var, var_const_max, ss_out_constraints);
printf("constraint is empty? or not? %d -> check below:\nFinal constraints:", empty_const);
//print_constraint_stream(num_var, var_const_max, ss_out_constraints);
}
printf("While loop ended:%d\n", empty_const);
printf("\nreflection printing\n");
print_constraint_stream(num_var, var_const_max, ss_reflection, is_uniform_var_width, is_equal_const_size);
//printf("\nout_constraint printing\n");
//print_constraint_stream(num_var, var_const_max, ss_out_constraints, is_uniform_var_width, is_equal_const_size);
printf("\nout_analysis printing\n");
print_constraint_stream(num_var, var_const_max, ss_out_analysis, is_uniform_var_width, is_equal_const_size);
printf ("Total iteration of kernel:%d\n", while_iter_count);
*/
}
extern "C" int ffi_tester (int x)
{
int i;
for(i=0;i<x;i++) {
printf("ffi on testing : %d\n", i);
}
return 0;
}
|
8e5b8c041a9de3d2fde768e49ec2d18a318b5780.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/ztrtri_upper_batched.cu, normal z -> d, Sun Nov 20 20:20:30 2016
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
@author Ahmad Abdelfattah
This file implements upper case, and is called by dtrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "dtrtri.cuh"
#include "dtrtri_upper_device.cuh"
/******************************************************************************/
__global__ void
dtrtri_diag_upper_kernel_batched(
magma_diag_t diag, int n, double const * const * dA_array, int lda, double **dinvA_array)
{
int batchid = blockIdx.z;
dtrtri_diag_upper_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]);
}
/******************************************************************************/
__global__ void
triple_dgemm16_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm16_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm16_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm16_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm32_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm32_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm32_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm32_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm64_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm64_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm_above64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm_above64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part3_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm_above64_part3_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
// =============================================================================
// vbatched kernels
/******************************************************************************/
__global__ void
dtrtri_diag_upper_kernel_vbatched(
magma_diag_t diag, magma_int_t* n, double const * const * dA_array, magma_int_t* lda, double **dinvA_array)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
if(blockIdx.x >= magma_ceildiv(my_n, IB)) return;
dtrtri_diag_upper_device(diag, my_n, dA_array[batchid], (int)lda[batchid], dinvA_array[batchid]);
}
// The kernels below have 3D grids
// grid.x and grid.y are independent from my_n
// only grid.y is dependent on my_n, so terminating thread blocks is based on blockIdx.y
/******************************************************************************/
__global__ void
triple_dgemm16_part1_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm16_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm16_part2_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm16_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm32_part1_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm32_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm32_part2_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm32_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm64_part1_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm64_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm64_part2_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm64_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part1_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm_above64_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part2_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm_above64_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part3_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm_above64_part3_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
| 8e5b8c041a9de3d2fde768e49ec2d18a318b5780.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/ztrtri_upper_batched.cu, normal z -> d, Sun Nov 20 20:20:30 2016
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
@author Ahmad Abdelfattah
This file implements upper case, and is called by dtrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "dtrtri.cuh"
#include "dtrtri_upper_device.cuh"
/******************************************************************************/
__global__ void
dtrtri_diag_upper_kernel_batched(
magma_diag_t diag, int n, double const * const * dA_array, int lda, double **dinvA_array)
{
int batchid = blockIdx.z;
dtrtri_diag_upper_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]);
}
/******************************************************************************/
__global__ void
triple_dgemm16_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm16_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm16_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm16_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm32_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm32_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm32_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm32_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm64_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm64_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm_above64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm_above64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part3_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm_above64_part3_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
// =============================================================================
// vbatched kernels
/******************************************************************************/
__global__ void
dtrtri_diag_upper_kernel_vbatched(
magma_diag_t diag, magma_int_t* n, double const * const * dA_array, magma_int_t* lda, double **dinvA_array)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
if(blockIdx.x >= magma_ceildiv(my_n, IB)) return;
dtrtri_diag_upper_device(diag, my_n, dA_array[batchid], (int)lda[batchid], dinvA_array[batchid]);
}
// The kernels below have 3D grids
// grid.x and grid.y are independent from my_n
// only grid.y is dependent on my_n, so terminating thread blocks is based on blockIdx.y
/******************************************************************************/
__global__ void
triple_dgemm16_part1_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm16_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm16_part2_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm16_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm32_part1_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm32_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm32_part2_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm32_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm64_part1_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm64_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm64_part2_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm64_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part1_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm_above64_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part2_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm_above64_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part3_upper_kernel_vbatched(
magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_dgemm_above64_part3_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
|
6eec0db619ba5742a24ce793e903f2130f8d0f0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/reshape.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void kernel_reshape_forward(const int num, T *y, const T *x) {
NBLA_CUDA_KERNEL_LOOP(idx, num) { y[idx] = x[idx]; }
}
template <typename T, bool accum = true>
__global__ void kernel_reshape_backward(const int num, T *dx, const T *dy) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
if (accum) {
dx[idx] += dy[idx];
} else {
dx[idx] = dy[idx];
}
}
}
template <typename T>
void ReshapeCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
if (this->inplace_) {
return;
}
cuda_set_device(this->device_);
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
size_t size = inputs[0]->size();
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_reshape_forward, size, y, x);
}
template <typename T>
void ReshapeCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!(propagate_down[0])) {
return;
}
cuda_set_device(this->device_);
Tcu *dx = inputs[0]->cast_grad_and_get_pointer<Tcu>(
this->ctx_, !(this->inplace_ || accum[0]));
const Tcu *dy = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
size_t size = inputs[0]->size();
if (dx != dy && accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reshape_backward<Tcu, true>), size,
dx, dy);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reshape_backward<Tcu, false>), size,
dx, dy);
}
}
}
| 6eec0db619ba5742a24ce793e903f2130f8d0f0d.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/reshape.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void kernel_reshape_forward(const int num, T *y, const T *x) {
NBLA_CUDA_KERNEL_LOOP(idx, num) { y[idx] = x[idx]; }
}
template <typename T, bool accum = true>
__global__ void kernel_reshape_backward(const int num, T *dx, const T *dy) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
if (accum) {
dx[idx] += dy[idx];
} else {
dx[idx] = dy[idx];
}
}
}
template <typename T>
void ReshapeCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
if (this->inplace_) {
return;
}
cuda_set_device(this->device_);
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
size_t size = inputs[0]->size();
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_reshape_forward, size, y, x);
}
template <typename T>
void ReshapeCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!(propagate_down[0])) {
return;
}
cuda_set_device(this->device_);
Tcu *dx = inputs[0]->cast_grad_and_get_pointer<Tcu>(
this->ctx_, !(this->inplace_ || accum[0]));
const Tcu *dy = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
size_t size = inputs[0]->size();
if (dx != dy && accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reshape_backward<Tcu, true>), size,
dx, dy);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reshape_backward<Tcu, false>), size,
dx, dy);
}
}
}
|
74936733963d5fb965495da83dfec962298692e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
__global__ void occupancy_test(int * results)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int x1 = 1;
int x2 = 2;
int x3 = 3;
int x4 = 4;
int x5 = 5;
int x6 = 6;
int x7 = 7;
int x8 = 8;
results[gid] = x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 ;
}
int main()
{
int size = 1 << 16;
int byte_size = sizeof(int)*size;
int * d_results;
hipMalloc((void**)&d_results, byte_size);
hipMemset(d_results, 0, byte_size);
dim3 blocks(128);
dim3 grid((size+blocks.x-1)/blocks.x);
occupancy_test << <grid,blocks >> > (d_results);
hipDeviceSynchronize();
return 0;
}
| 74936733963d5fb965495da83dfec962298692e9.cu | #include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
__global__ void occupancy_test(int * results)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int x1 = 1;
int x2 = 2;
int x3 = 3;
int x4 = 4;
int x5 = 5;
int x6 = 6;
int x7 = 7;
int x8 = 8;
results[gid] = x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 ;
}
int main()
{
int size = 1 << 16;
int byte_size = sizeof(int)*size;
int * d_results;
cudaMalloc((void**)&d_results, byte_size);
cudaMemset(d_results, 0, byte_size);
dim3 blocks(128);
dim3 grid((size+blocks.x-1)/blocks.x);
occupancy_test << <grid,blocks >> > (d_results);
cudaDeviceSynchronize();
return 0;
}
|
8bf744da6a4a3556240c447ba393a79acb30261f.hip | // !!! This is a file automatically generated by hipify!!!
#include "../THCTensorMathPointwise.cuh"
#include "THHTensor.hpp"
#include "THHStream.h"
#include "../generic/THCTensorMathPointwise.cu"
#include "../THCGenerateHalfType.h"
| 8bf744da6a4a3556240c447ba393a79acb30261f.cu | #include "../THCTensorMathPointwise.cuh"
#include "THCTensor.hpp"
#include "THCStream.h"
#include "../generic/THCTensorMathPointwise.cu"
#include "../THCGenerateHalfType.h"
|
bfec0b4eb1279e6d9308e8480ec383030b98eb39.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| bfec0b4eb1279e6d9308e8480ec383030b98eb39.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
78394f1236b81857c8ac24d06d907d4c880dc5bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
{
__global__ void Sobel(const unsigned char* input, unsigned int* output, const unsigned int width, const unsigned int height)
{
const int col = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col > width-1 || row > height-1 || row == 0 || col == 0)
{
return;
}
const int index = col + row * width;
double dx = input[index-width+1] + 2 * input[index+1] + input[index+width+1] - input[index-width-1] - 2 * input[index-1] - input[index+width-1];
double dy = input[index+width-1] + 2 * input[index+width] + input[index+width+1] - input[index-width-1] - 2 * input[index-width] - input[index-width+1];
int magnitude = (int)( (sqrt(dx * dx + dy * dy)*255)/1141 );
output[index] = magnitude;
}
} | 78394f1236b81857c8ac24d06d907d4c880dc5bf.cu | extern "C"
{
__global__ void Sobel(const unsigned char* input, unsigned int* output, const unsigned int width, const unsigned int height)
{
const int col = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col > width-1 || row > height-1 || row == 0 || col == 0)
{
return;
}
const int index = col + row * width;
double dx = input[index-width+1] + 2 * input[index+1] + input[index+width+1] - input[index-width-1] - 2 * input[index-1] - input[index+width-1];
double dy = input[index+width-1] + 2 * input[index+width] + input[index+width+1] - input[index-width-1] - 2 * input[index-width] - input[index-width+1];
int magnitude = (int)( (sqrt(dx * dx + dy * dy)*255)/1141 );
output[index] = magnitude;
}
} |
062ef608455058975c443129c21ddaef7ac9b1ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
/*
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
*/
__global__ void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
/*
* Device kernel stores into `result` the sum of each
* same-indexed value of `a` and `b`.
*/
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
int deviceId;
hipGetDevice(&deviceId);
hipDeviceProp_t props;
hipGetDeviceProperties(&props, deviceId);
int multiProcessorCount = props.multiProcessorCount;
int warpSize = props.warpSize;
//initWith(3, a, N);
//initWith(4, b, N);
//initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
/*
* nvprof should register performance changes when execution configuration
* is updated.
*/
threadsPerBlock = warpSize;
numberOfBlocks = multiProcessorCount;
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 3, a, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 4, b, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 0, c, N);
hipDeviceSynchronize();
hipError_t addVectorsErr;
hipError_t asyncErr;
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
| 062ef608455058975c443129c21ddaef7ac9b1ea.cu | #include <stdio.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
/*
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
*/
__global__ void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
/*
* Device kernel stores into `result` the sum of each
* same-indexed value of `a` and `b`.
*/
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
int deviceId;
cudaGetDevice(&deviceId);
cudaDeviceProp props;
cudaGetDeviceProperties(&props, deviceId);
int multiProcessorCount = props.multiProcessorCount;
int warpSize = props.warpSize;
//initWith(3, a, N);
//initWith(4, b, N);
//initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
/*
* nvprof should register performance changes when execution configuration
* is updated.
*/
threadsPerBlock = warpSize;
numberOfBlocks = multiProcessorCount;
initWith<<<numberOfBlocks, threadsPerBlock>>>(3, a, N);
initWith<<<numberOfBlocks, threadsPerBlock>>>(4, b, N);
initWith<<<numberOfBlocks, threadsPerBlock>>>(0, c, N);
cudaDeviceSynchronize();
cudaError_t addVectorsErr;
cudaError_t asyncErr;
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
5ea6f51afb546ea7f2f8ac15cc4823770055fb2a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::device_vector<int> dev_idata(idata, idata + n);
thrust::device_vector<int> dev_odata(odata, odata + n);
timer().startGpuTimer();
thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin());
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy(dev_odata.begin(), dev_odata.end(), odata);
}
}
}
| 5ea6f51afb546ea7f2f8ac15cc4823770055fb2a.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::device_vector<int> dev_idata(idata, idata + n);
thrust::device_vector<int> dev_odata(odata, odata + n);
timer().startGpuTimer();
thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin());
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy(dev_odata.begin(), dev_odata.end(), odata);
}
}
}
|
e70847914326740a6236a865aad95a7f62545232.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helpers/DebugHelper.h>
#include <helpers/TAD.h>
#include <helpers/shape.h>
#include <loops/summarystatsreduce.h>
#include <ops/specials_cuda.h>
#include <system/Environment.h>
#include <system/op_boilerplate.h>
#include <types/float16.h>
#include <types/types.h>
using namespace simdOps;
namespace functions {
namespace summarystats {
template <typename X, typename Z>
void SD_KERNEL summaryStatsReduceT(int op, void const* dx, sd::LongType const* xShapeInfo, sd::LongType xRank, void* extraParams,
void* z, sd::LongType const* zShapeInfo, sd::LongType zRank,
sd::LongType* dimension, long long int dimensionLength, int postProcessOrNot, bool biasCorrected, sd::LongType* allocationBuffer,
void* reductionBuffer, sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets) {
functions::summarystats::SummaryStatsReduce<X, Z>::transform(
op, dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, biasCorrected, allocationBuffer,
reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
/**
*
* @param sPartialsRef
* @param tid
* @param extraParams
*/
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void SummaryStatsReduce<X, Z>::aggregatePartials(SummaryStatsData<X>* sPartials, sd::LongType tid,
sd::LongType numElements, void* vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto extraParams = static_cast<Z*>(vextraParams);
sd::LongType floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
SummaryStatsData<X> prev = sPartials[tid - floorPow2];
SummaryStatsData<X> curr = sPartials[tid];
sPartials[tid - floorPow2] = update(prev, curr, extraParams);
}
__syncthreads();
}
for (sd::LongType activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
SummaryStatsData<X> curr = sPartials[tid];
SummaryStatsData<X> next = sPartials[tid + activeThreads];
sPartials[tid] = update(curr, next, extraParams);
}
__syncthreads();
}
};
/**
* @param n n is the number of
* elements to loop through
* @param dx the data to operate on
* @param xVectorInfo the meta data for the vector:
* 0 is the offset
* 1 is the increment/stride
* 2 is the real length of the buffer (n and dx.length won't always be the same)
* 3 is the element wise stride for the buffer
* 4 is the number of elements it takes to get to the next row/column/tensor
* @param gpuInformation
* 0 is the block size
* 1 is the grid size
* 2 is the shared memory size
* @param problemDefinition
* 0 is the number of elements per vector
* 1 is the number of vectors
*/
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void SummaryStatsReduce<X, Z>::transform(void const* vx, sd::LongType const* xShapeInfo, void* vextraParams,
void* vz, sd::LongType const* zShapeInfo, sd::LongType* dimension,
sd::LongType dimensionLength, int postProcessOrNot,
sd::LongType* allocationBuffer,
void* vreductionBuffer, sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets) {
auto dx = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
auto reductionBuffer = static_cast<Z*>(vreductionBuffer);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
__shared__ int xElementWiseStride;
int numElements = blockDim.x;
// shared memory space for storing intermediate results
__shared__ SummaryStatsData<X> sPartials[SD_CUDA_BLOCK_SIZE];
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
// length for the tad
__shared__ volatile int xLength;
__shared__ volatile int resultLength;
SummaryStatsData<X> reduction;
reduction.initWithValue(0.0);
reduction.n = 0;
if (threadIdx.x == 0) {
if (zShapeInfo != nullptr)
resultLength = shape::length(zShapeInfo);
else
resultLength = 1;
if (dimensionLength == 1) {
if (resultLength == 1 && (dimension == nullptr || dimension[0] == SD_MAX_DIMENSION))
resultScalar = 1;
else
resultScalar = 0;
} else
resultScalar = 0;
if (resultLength == 1) resultScalar = 1;
auto xStride = shape::stride(xShapeInfo);
auto xOrder = shape::order(xShapeInfo);
if (dimension != nullptr && (dimension[0] != SD_MAX_DIMENSION && dimensionLength == 1)) {
xElementWiseStride = xStride[dimension[0]];
} else {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
}
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int numTads;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
}
__syncthreads();
if (tadEWS == 0) {
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[r];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[xOffset]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]);
}
__syncthreads();
}
} else {
for (int i = blockIdx.x; i < numTads; i += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[i];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int x = threadIdx.x; x < tadLength; x += blockDim.x) {
auto indexX = tadOffsetForBlock + x * tadEWS;
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[i] = OpType::getValue(postProcessOrNot,
sPartials[threadIdx.x]); // postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
} else if (resultScalar) {
__shared__ int n;
if (threadIdx.x == 0) {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
n = shape::length(xShapeInfo);
}
__syncthreads();
if (xElementWiseStride >= 1) {
for (sd::LongType i = tid; i < n; i += (blockDim.x * gridDim.x)) {
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[i * xElementWiseStride]);
reduction = update(reduction, indexVal2, extraParams);
}
} else {
for (sd::LongType i = tid; i < n; i += blockDim.x * gridDim.x) {
auto offset = shape::getIndexOffset(i, xShapeInfo);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[offset]);
reduction = update(reduction, indexVal2, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, blockDim.x, extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int* tc = (unsigned int*)reductionBuffer;
tid = threadIdx.x;
if (threadIdx.x == 0) {
SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*)reductionBuffer;
pBuffer[blockIdx.x] = sPartials[0];
}
__threadfence();
__syncthreads();
if (tid == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*)reductionBuffer;
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, gridDim.x, extraParams);
__syncthreads();
if (tid == 0) {
z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
} else {
if (tid == 0) {
unsigned int* tc = (unsigned*)reductionBuffer;
tc[16384] = 0;
z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
};
template <typename X, typename Y>
SD_DEVICE void SummaryStatsReduce<X, Y>::transform(const int opNum, void const* dx, sd::LongType const* xShapeInfo,
void* extraParams, void* z, sd::LongType const* zShapeInfo,
sd::LongType* dimension, sd::LongType dimensionLength, int postProcessOrNot, sd::LongType* allocationBuffer, void* reductionBuffer,
sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets) {
DISPATCH_BY_OPNUM_TT(transform,
PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot,
allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets),
SUMMARY_STATS_OPS);
};
template <typename X, typename Z>
SD_HOST void SummaryStatsReduce<X, Z>::execSummaryStatsReduceScalar(
dim3& launchDims, hipStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo,
sd::LongType const* hxShapeInfo, void* vextraParams, void* vz, sd::LongType const* zShapeInfo,
sd::LongType const* hzShapeInfo, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets,
bool biasCorrected, void* reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto extraParams = static_cast<Z*>(vextraParams);
auto z = reinterpret_cast<Z*>(vz);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("D16 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( summaryStatsReduceT<X, Z>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1,
1, biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
// this is blocking method since method should return scalar
sd::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed");
}
template <typename X, typename Z>
SD_HOST void SummaryStatsReduce<X, Z>::execSummaryStatsReduce(
dim3& launchDims, hipStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo,
sd::LongType const* hxShapeInfo, void* vextraParams, void* vz, sd::LongType const* zShapeInfo,
sd::LongType const* hzShapeInfo, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets,
bool biasCorrected, void* reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F17 opNum:[%i]\n", opNum);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
hipLaunchKernelGGL(( summaryStatsReduceT<X, Z>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1,
1, biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <typename X, typename Z>
SD_HOST void SummaryStatsReduce<X, Z>::execSummaryStatsReduce(
dim3& launchDims, hipStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo,
sd::LongType const* hxShapeInfo, void* vextraParams, void* vz, sd::LongType const* zShapeInfo,
sd::LongType const* hzShapeInfo, sd::LongType* dimension, long long int dimensionLength, sd::LongType const* tadShapeInfo,
sd::LongType const* tadOffsets, bool biasCorrected, void* reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("D18 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( summaryStatsReduceT<X, Z>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), dimension,
dimensionLength, 1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
BUILD_DOUBLE_TEMPLATE(template class SummaryStatsReduce, , SD_COMMON_TYPES, SD_FLOAT_TYPES);
} // namespace summarystats
} // namespace functions
| e70847914326740a6236a865aad95a7f62545232.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <cuda.h>
#include <cuda_runtime.h>
#include <helpers/DebugHelper.h>
#include <helpers/TAD.h>
#include <helpers/shape.h>
#include <loops/summarystatsreduce.h>
#include <ops/specials_cuda.h>
#include <system/Environment.h>
#include <system/op_boilerplate.h>
#include <types/float16.h>
#include <types/types.h>
using namespace simdOps;
namespace functions {
namespace summarystats {
template <typename X, typename Z>
void SD_KERNEL summaryStatsReduceT(int op, void const* dx, sd::LongType const* xShapeInfo, sd::LongType xRank, void* extraParams,
void* z, sd::LongType const* zShapeInfo, sd::LongType zRank,
sd::LongType* dimension, long long int dimensionLength, int postProcessOrNot, bool biasCorrected, sd::LongType* allocationBuffer,
void* reductionBuffer, sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets) {
functions::summarystats::SummaryStatsReduce<X, Z>::transform(
op, dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, biasCorrected, allocationBuffer,
reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
/**
*
* @param sPartialsRef
* @param tid
* @param extraParams
*/
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void SummaryStatsReduce<X, Z>::aggregatePartials(SummaryStatsData<X>* sPartials, sd::LongType tid,
sd::LongType numElements, void* vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto extraParams = static_cast<Z*>(vextraParams);
sd::LongType floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
SummaryStatsData<X> prev = sPartials[tid - floorPow2];
SummaryStatsData<X> curr = sPartials[tid];
sPartials[tid - floorPow2] = update(prev, curr, extraParams);
}
__syncthreads();
}
for (sd::LongType activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
SummaryStatsData<X> curr = sPartials[tid];
SummaryStatsData<X> next = sPartials[tid + activeThreads];
sPartials[tid] = update(curr, next, extraParams);
}
__syncthreads();
}
};
/**
* @param n n is the number of
* elements to loop through
* @param dx the data to operate on
* @param xVectorInfo the meta data for the vector:
* 0 is the offset
* 1 is the increment/stride
* 2 is the real length of the buffer (n and dx.length won't always be the same)
* 3 is the element wise stride for the buffer
* 4 is the number of elements it takes to get to the next row/column/tensor
* @param gpuInformation
* 0 is the block size
* 1 is the grid size
* 2 is the shared memory size
* @param problemDefinition
* 0 is the number of elements per vector
* 1 is the number of vectors
*/
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void SummaryStatsReduce<X, Z>::transform(void const* vx, sd::LongType const* xShapeInfo, void* vextraParams,
void* vz, sd::LongType const* zShapeInfo, sd::LongType* dimension,
sd::LongType dimensionLength, int postProcessOrNot,
sd::LongType* allocationBuffer,
void* vreductionBuffer, sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets) {
auto dx = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
auto reductionBuffer = static_cast<Z*>(vreductionBuffer);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
__shared__ int xElementWiseStride;
int numElements = blockDim.x;
// shared memory space for storing intermediate results
__shared__ SummaryStatsData<X> sPartials[SD_CUDA_BLOCK_SIZE];
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
// length for the tad
__shared__ volatile int xLength;
__shared__ volatile int resultLength;
SummaryStatsData<X> reduction;
reduction.initWithValue(0.0);
reduction.n = 0;
if (threadIdx.x == 0) {
if (zShapeInfo != nullptr)
resultLength = shape::length(zShapeInfo);
else
resultLength = 1;
if (dimensionLength == 1) {
if (resultLength == 1 && (dimension == nullptr || dimension[0] == SD_MAX_DIMENSION))
resultScalar = 1;
else
resultScalar = 0;
} else
resultScalar = 0;
if (resultLength == 1) resultScalar = 1;
auto xStride = shape::stride(xShapeInfo);
auto xOrder = shape::order(xShapeInfo);
if (dimension != nullptr && (dimension[0] != SD_MAX_DIMENSION && dimensionLength == 1)) {
xElementWiseStride = xStride[dimension[0]];
} else {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
}
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int numTads;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
}
__syncthreads();
if (tadEWS == 0) {
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[r];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[xOffset]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]);
}
__syncthreads();
}
} else {
for (int i = blockIdx.x; i < numTads; i += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[i];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int x = threadIdx.x; x < tadLength; x += blockDim.x) {
auto indexX = tadOffsetForBlock + x * tadEWS;
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[i] = OpType::getValue(postProcessOrNot,
sPartials[threadIdx.x]); // postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
} else if (resultScalar) {
__shared__ int n;
if (threadIdx.x == 0) {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
n = shape::length(xShapeInfo);
}
__syncthreads();
if (xElementWiseStride >= 1) {
for (sd::LongType i = tid; i < n; i += (blockDim.x * gridDim.x)) {
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[i * xElementWiseStride]);
reduction = update(reduction, indexVal2, extraParams);
}
} else {
for (sd::LongType i = tid; i < n; i += blockDim.x * gridDim.x) {
auto offset = shape::getIndexOffset(i, xShapeInfo);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[offset]);
reduction = update(reduction, indexVal2, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, blockDim.x, extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int* tc = (unsigned int*)reductionBuffer;
tid = threadIdx.x;
if (threadIdx.x == 0) {
SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*)reductionBuffer;
pBuffer[blockIdx.x] = sPartials[0];
}
__threadfence();
__syncthreads();
if (tid == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*)reductionBuffer;
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, gridDim.x, extraParams);
__syncthreads();
if (tid == 0) {
z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
} else {
if (tid == 0) {
unsigned int* tc = (unsigned*)reductionBuffer;
tc[16384] = 0;
z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
};
template <typename X, typename Y>
SD_DEVICE void SummaryStatsReduce<X, Y>::transform(const int opNum, void const* dx, sd::LongType const* xShapeInfo,
void* extraParams, void* z, sd::LongType const* zShapeInfo,
sd::LongType* dimension, sd::LongType dimensionLength, int postProcessOrNot, sd::LongType* allocationBuffer, void* reductionBuffer,
sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets) {
DISPATCH_BY_OPNUM_TT(transform,
PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot,
allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets),
SUMMARY_STATS_OPS);
};
template <typename X, typename Z>
SD_HOST void SummaryStatsReduce<X, Z>::execSummaryStatsReduceScalar(
dim3& launchDims, cudaStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo,
sd::LongType const* hxShapeInfo, void* vextraParams, void* vz, sd::LongType const* zShapeInfo,
sd::LongType const* hzShapeInfo, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets,
bool biasCorrected, void* reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto extraParams = static_cast<Z*>(vextraParams);
auto z = reinterpret_cast<Z*>(vz);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("D16 opNum:[%i]\n", opNum);
summaryStatsReduceT<X, Z><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1,
1, biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
// this is blocking method since method should return scalar
sd::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed");
}
template <typename X, typename Z>
SD_HOST void SummaryStatsReduce<X, Z>::execSummaryStatsReduce(
dim3& launchDims, cudaStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo,
sd::LongType const* hxShapeInfo, void* vextraParams, void* vz, sd::LongType const* zShapeInfo,
sd::LongType const* hzShapeInfo, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets,
bool biasCorrected, void* reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F17 opNum:[%i]\n", opNum);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
summaryStatsReduceT<X, Z><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1,
1, biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <typename X, typename Z>
SD_HOST void SummaryStatsReduce<X, Z>::execSummaryStatsReduce(
dim3& launchDims, cudaStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo,
sd::LongType const* hxShapeInfo, void* vextraParams, void* vz, sd::LongType const* zShapeInfo,
sd::LongType const* hzShapeInfo, sd::LongType* dimension, long long int dimensionLength, sd::LongType const* tadShapeInfo,
sd::LongType const* tadOffsets, bool biasCorrected, void* reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("D18 opNum:[%i]\n", opNum);
summaryStatsReduceT<X, Z><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), dimension,
dimensionLength, 1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
BUILD_DOUBLE_TEMPLATE(template class SummaryStatsReduce, , SD_COMMON_TYPES, SD_FLOAT_TYPES);
} // namespace summarystats
} // namespace functions
|
441330e2785016b185dddf41d5a0bfa8cbfb1a47.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include "basics/tensor.cu"
#include "basics/session.hpp"
#include "basics/network.cu"
#include "layers/data.cu"
#include "layers/softmax.cu"
#include "layers/cross_entropy_loss.cu"
#include "layers/pooling.cu"
#include "layers/conv2d.cu"
#include "layers/relu.cu"
#include "layers/fc.cu"
#include "utils/bitmap_image.hpp"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "utils/helper_cuda.h"
#include "utils/utils.cu"
#include "utils/load_model.hpp"
__global__ void show_tensor(Tensor<double> * tensor) {
size_t d1 = tensor->GetDims()[0];
size_t d2 = tensor->GetDims()[1];
size_t d3 = tensor->GetDims()[2];
size_t d4 = tensor->GetDims()[3];
for(int k = 0; k < d3; k++) {
for(int l = 0; l < d4; l++) {
for(int j = 0; j < d2; j++) {
for(int i = 0; i < d1; i++) {
printf("%e ", tensor->at(i, j, k, l));
}
printf("\n");
}
printf("\n");
}
printf("\n");
}
}
void print_acc(int iter, int batch_size, Tensor<double>* sm_top_gpu, Tensor<double> * labels_gpu) {
Tensor<double>* sm_top_cpu = Tensor<double>::TensorGPUtoCPU(sm_top_gpu);
Tensor<double>* label_cpu = Tensor<double>::TensorGPUtoCPU(labels_gpu);
// batch, 1, 1, 1
// batch, 1, 1, 1
double cnt = 0;
for(int i = 0; i < batch_size; i++) {
double max_val = sm_top_cpu->at(i, 0, 0, 0);
int label = 0;
for(int j = 0; j < 10; j++) {
// printf("%f ", sm_top_cpu->at(i, 0, 0, j));
if(sm_top_cpu->at(i, 0, 0, j) > max_val) {
max_val = sm_top_cpu->at(i, 0, 0, j);
label = j;
}
}
// printf("predicted label: %d, ground truth label: %d \n", label, (int)label_cpu->at(i, 0, 0, 0));
if(label == (int)label_cpu->at(i, 0, 0, 0)) {
cnt += 1;
}
}
double acc = cnt / (double) batch_size;
printf("iteration %d accuracy: %d/%d %f \n", iter, (int)cnt, batch_size, acc);
delete sm_top_cpu;
delete label_cpu;
}
void demo_bp_cifar10_gpu() {
printf("Start training convolutional networks on cifar10\n\n");
hipError_t cudaStatus = hipSetDevice(0);
checkCudaErrors(cudaStatus);
startTimer();
Session* session = Session::GetNewSession();
session->gpu = true;
session->batch_size = 1;
session->lr = 0.0002;
size_t batch_size = session->batch_size;
Data<double>* data_layer = new Data<double>(batch_size, "datasets/cifar10/train.txt");
Conv2D<double>* conv1 = new Conv2D<double>
(5,5,3,32,1, new GaussianKernelInitializer<double>(0.0001), SAME);
Pooling<double>* pool1 = new Pooling<double>(2, MAX, 2);
Relu<double>* relu1 = new Relu<double>;
Conv2D<double>* conv2 = new Conv2D<double>
(5,5,32,32,1, new GaussianKernelInitializer<double>(0.01), SAME);
Pooling<double>* pool2 = new Pooling<double>(2, MAX, 2);
Relu<double>* relu2 = new Relu<double>;
Conv2D<double>* conv3 = new Conv2D<double>
(5,5,32,64,1, new GaussianKernelInitializer<double>(0.01), SAME);
Pooling<double>* pool3 = new Pooling<double>(2, MAX, 2);
Relu<double>* relu3 = new Relu<double>;
FC<double>* fc4 = new FC<double>
(4*4*64,64, new GaussianKernelInitializer<double>(0.1));
FC<double>* fc5 = new FC<double>
(64, 10, new GaussianKernelInitializer<double>(0.1));
Softmax<double>* softmax = new Softmax<double>;
CrossEntropyLoss<double>* cel_layer = new CrossEntropyLoss<double>;
Network<double> network({data_layer, conv1, pool1, relu1,
conv2, pool2, relu2,
conv3, pool3, relu3,
fc4, fc5, softmax, cel_layer});
printf("network finished setup: %3.1f ms \n", stopTimer());
show_mem(cudaStatus);
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus);
for(int iter = 0; iter < 10000; iter++) {
startTimer();
network.Forward();
network.Backward();
printf("iteration: %d\n", iter);
Tensor<double>* cel_top = network.GetLayerData(13)->tops[0];
if (iter % 1 == 0) {
hipLaunchKernelGGL(( show_tensor), dim3(1),dim3(1), 0, 0, cel_top);
}
Tensor<double>* sm_top = network.GetLayerData(12)->tops[0];
Tensor<double>* labels = network.GetLayerData(0)->tops[1];
print_acc(iter, batch_size, sm_top, labels);
printf("iteration time: %3.1f ms \n", stopTimer());
}
printf("Prediction: \n");
Tensor<double>* sm_top = network.GetLayerData(12)->tops[0];
Tensor<double>* out = Tensor<double>::TensorGPUtoCPU(sm_top);
for (int b = 0; b < out->GetDims()[0]; b++) {
for (int h = 0; h < out->GetDims()[1]; h++) {
for (int w = 0; w < out->GetDims()[2]; w++) {
for (int c = 0; c < out->GetDims()[3]; c++) {
if (c == 0) { printf("Airplane "); }
else if (c == 1) { printf("Automobile "); }
else if (c == 2) { printf("Bird "); }
else if (c == 3) { printf("Cat "); }
else if (c == 4) { printf("Deer "); }
else if (c == 5) { printf("Dog "); }
else if (c == 6) { printf("Frog "); }
else if (c == 7) { printf("Horse "); }
else if (c == 8) { printf("Ship "); }
else if (c == 9) { printf("truck "); }
printf("probability: %1.4f \n", out->at(b,h,w,c));
}
}
}
}
}
int main() {
demo_bp_cifar10_gpu();
}
| 441330e2785016b185dddf41d5a0bfa8cbfb1a47.cu | #include <stdio.h>
#include <assert.h>
#include "basics/tensor.cu"
#include "basics/session.hpp"
#include "basics/network.cu"
#include "layers/data.cu"
#include "layers/softmax.cu"
#include "layers/cross_entropy_loss.cu"
#include "layers/pooling.cu"
#include "layers/conv2d.cu"
#include "layers/relu.cu"
#include "layers/fc.cu"
#include "utils/bitmap_image.hpp"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "utils/helper_cuda.h"
#include "utils/utils.cu"
#include "utils/load_model.hpp"
__global__ void show_tensor(Tensor<double> * tensor) {
size_t d1 = tensor->GetDims()[0];
size_t d2 = tensor->GetDims()[1];
size_t d3 = tensor->GetDims()[2];
size_t d4 = tensor->GetDims()[3];
for(int k = 0; k < d3; k++) {
for(int l = 0; l < d4; l++) {
for(int j = 0; j < d2; j++) {
for(int i = 0; i < d1; i++) {
printf("%e ", tensor->at(i, j, k, l));
}
printf("\n");
}
printf("\n");
}
printf("\n");
}
}
void print_acc(int iter, int batch_size, Tensor<double>* sm_top_gpu, Tensor<double> * labels_gpu) {
Tensor<double>* sm_top_cpu = Tensor<double>::TensorGPUtoCPU(sm_top_gpu);
Tensor<double>* label_cpu = Tensor<double>::TensorGPUtoCPU(labels_gpu);
// batch, 1, 1, 1
// batch, 1, 1, 1
double cnt = 0;
for(int i = 0; i < batch_size; i++) {
double max_val = sm_top_cpu->at(i, 0, 0, 0);
int label = 0;
for(int j = 0; j < 10; j++) {
// printf("%f ", sm_top_cpu->at(i, 0, 0, j));
if(sm_top_cpu->at(i, 0, 0, j) > max_val) {
max_val = sm_top_cpu->at(i, 0, 0, j);
label = j;
}
}
// printf("predicted label: %d, ground truth label: %d \n", label, (int)label_cpu->at(i, 0, 0, 0));
if(label == (int)label_cpu->at(i, 0, 0, 0)) {
cnt += 1;
}
}
double acc = cnt / (double) batch_size;
printf("iteration %d accuracy: %d/%d %f \n", iter, (int)cnt, batch_size, acc);
delete sm_top_cpu;
delete label_cpu;
}
void demo_bp_cifar10_gpu() {
printf("Start training convolutional networks on cifar10\n\n");
cudaError_t cudaStatus = cudaSetDevice(0);
checkCudaErrors(cudaStatus);
startTimer();
Session* session = Session::GetNewSession();
session->gpu = true;
session->batch_size = 1;
session->lr = 0.0002;
size_t batch_size = session->batch_size;
Data<double>* data_layer = new Data<double>(batch_size, "datasets/cifar10/train.txt");
Conv2D<double>* conv1 = new Conv2D<double>
(5,5,3,32,1, new GaussianKernelInitializer<double>(0.0001), SAME);
Pooling<double>* pool1 = new Pooling<double>(2, MAX, 2);
Relu<double>* relu1 = new Relu<double>;
Conv2D<double>* conv2 = new Conv2D<double>
(5,5,32,32,1, new GaussianKernelInitializer<double>(0.01), SAME);
Pooling<double>* pool2 = new Pooling<double>(2, MAX, 2);
Relu<double>* relu2 = new Relu<double>;
Conv2D<double>* conv3 = new Conv2D<double>
(5,5,32,64,1, new GaussianKernelInitializer<double>(0.01), SAME);
Pooling<double>* pool3 = new Pooling<double>(2, MAX, 2);
Relu<double>* relu3 = new Relu<double>;
FC<double>* fc4 = new FC<double>
(4*4*64,64, new GaussianKernelInitializer<double>(0.1));
FC<double>* fc5 = new FC<double>
(64, 10, new GaussianKernelInitializer<double>(0.1));
Softmax<double>* softmax = new Softmax<double>;
CrossEntropyLoss<double>* cel_layer = new CrossEntropyLoss<double>;
Network<double> network({data_layer, conv1, pool1, relu1,
conv2, pool2, relu2,
conv3, pool3, relu3,
fc4, fc5, softmax, cel_layer});
printf("network finished setup: %3.1f ms \n", stopTimer());
show_mem(cudaStatus);
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus);
for(int iter = 0; iter < 10000; iter++) {
startTimer();
network.Forward();
network.Backward();
printf("iteration: %d\n", iter);
Tensor<double>* cel_top = network.GetLayerData(13)->tops[0];
if (iter % 1 == 0) {
show_tensor<<<1,1>>>(cel_top);
}
Tensor<double>* sm_top = network.GetLayerData(12)->tops[0];
Tensor<double>* labels = network.GetLayerData(0)->tops[1];
print_acc(iter, batch_size, sm_top, labels);
printf("iteration time: %3.1f ms \n", stopTimer());
}
printf("Prediction: \n");
Tensor<double>* sm_top = network.GetLayerData(12)->tops[0];
Tensor<double>* out = Tensor<double>::TensorGPUtoCPU(sm_top);
for (int b = 0; b < out->GetDims()[0]; b++) {
for (int h = 0; h < out->GetDims()[1]; h++) {
for (int w = 0; w < out->GetDims()[2]; w++) {
for (int c = 0; c < out->GetDims()[3]; c++) {
if (c == 0) { printf("Airplane "); }
else if (c == 1) { printf("Automobile "); }
else if (c == 2) { printf("Bird "); }
else if (c == 3) { printf("Cat "); }
else if (c == 4) { printf("Deer "); }
else if (c == 5) { printf("Dog "); }
else if (c == 6) { printf("Frog "); }
else if (c == 7) { printf("Horse "); }
else if (c == 8) { printf("Ship "); }
else if (c == 9) { printf("truck "); }
printf("probability: %1.4f \n", out->at(b,h,w,c));
}
}
}
}
}
int main() {
demo_bp_cifar10_gpu();
}
|
be5ee13076b06c3612238fc9730810d6e5a971cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "include.cuh"
#include "ANCFSystem.cuh"
#include "Element.cuh"
#include "Node.cuh"
#include "Particle_hip.cuh"
bool updateDraw = 1;
bool showSphere = 1;
// Create the system (placed outside of main so it is available to the OpenGL code)
int workingThread = 0;
const int numSystems = 2;
ANCFSystem* sys[numSystems];
#ifdef WITH_GLUT
OpenGLCamera oglcamera(camreal3(-1,1,-1),camreal3(0,0,0),camreal3(0,1,0),.01);
// OPENGL RENDERING CODE //
void changeSize(int w, int h) {
if(h == 0) {h = 1;}
float ratio = 1.0* w / h;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glViewport(0, 0, w, h);
gluPerspective(45,ratio,.1,1000);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0.0,0.0,0.0, 0.0,0.0,-7, 0.0f,1.0f,0.0f);
}
void initScene(){
GLfloat light_position[] = { 1.0, 1.0, 1.0, 0.0 };
glClearColor (1.0, 1.0, 1.0, 0.0);
glShadeModel (GL_SMOOTH);
glEnable(GL_COLOR_MATERIAL);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable (GL_POINT_SMOOTH);
glEnable (GL_BLEND);
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glHint (GL_POINT_SMOOTH_HINT, GL_DONT_CARE);
}
void drawAll()
{
if(updateDraw){
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
glFrontFace(GL_CCW);
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
glDepthFunc(GL_LEQUAL);
glClearDepth(1.0);
glPointSize(2);
glLoadIdentity();
oglcamera.Update();
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) {
glColor3f(0.0f,0.0f,1.0f);
if(sysIndex==1) glColor3f(1.0f,0.0f,0.0f);
for(int i=0;i<sys[sysIndex]->elements.size();i++)
{
int xiDiv = sys[sysIndex]->numContactPoints;
double xiInc = 1/(static_cast<double>(xiDiv-1));
if(showSphere)
{
//glColor3f(0.0f,0.0f,1.0f);
for(int j=0;j<xiDiv;j++)
{
glPushMatrix();
float3 position = sys[sysIndex]->getXYZPosition(i,xiInc*j);
glTranslatef(position.x,position.y,position.z);
glutSolidSphere(sys[sysIndex]->elements[i].getRadius(),10,10);
glPopMatrix();
}
}
else
{
int xiDiv = sys[sysIndex]->numContactPoints;
double xiInc = 1/(static_cast<double>(xiDiv-1));
glLineWidth(sys[sysIndex]->elements[i].getRadius()*500);
//glColor3f(0.0f,1.0f,0.0f);
glBegin(GL_LINE_STRIP);
for(int j=0;j<sys[sysIndex]->numContactPoints;j++)
{
float3 position = sys[sysIndex]->getXYZPosition(i,xiInc*j);
glVertex3f(position.x,position.y,position.z);
}
glEnd();
glFlush();
}
}
}
glutSwapBuffers();
}
}
void renderSceneAll(){
if(OGL){
//if(sys->timeIndex%10==0)
drawAll();
// Figure out the non-working thread (based on working thread)
int nonWorkingThread = 1;
if(workingThread) nonWorkingThread = 0;
// The working thread will perform the time step while the non-working thread updates the preconditioner
cout << "SYSTEM " << workingThread << " UPDATE TIME STEP" << "(TIME: " << sys[workingThread]->time << ")" << endl;
sys[workingThread]->DoTimeStep();
cout << " SYSTEM " << nonWorkingThread << " UPDATE PRECONDITIONER... " << endl;
if(!sys[nonWorkingThread]->precUpdated) sys[nonWorkingThread]->updatePreconditioner();
cout << " PRECONDITIONER UPDATE COMPLETE (" << sys[nonWorkingThread]->precUpdated << ")" << endl;
// When the preconditioner is ready, switch the jobs of the systems
if(sys[workingThread]->timeIndex%1 == 0) {
cout << "SWITCH SYSTEM " << workingThread << " -> " << abs(1-workingThread) << "... ";
sys[workingThread]->transferState(sys[nonWorkingThread]);
cout << "SWITCH COMPLETE." << endl;
workingThread = nonWorkingThread;
}
}
}
void CallBackKeyboardFunc(unsigned char key, int x, int y) {
switch (key) {
case 'w':
oglcamera.Forward();
break;
case 's':
oglcamera.Back();
break;
case 'd':
oglcamera.Right();
break;
case 'a':
oglcamera.Left();
break;
case 'q':
oglcamera.Up();
break;
case 'e':
oglcamera.Down();
break;
}
}
void CallBackMouseFunc(int button, int state, int x, int y) {
oglcamera.SetPos(button, state, x, y);
}
void CallBackMotionFunc(int x, int y) {
oglcamera.Move2D(x, y);
}
#endif
// END OPENGL RENDERING CODE //
int main(int argc, char** argv)
{
/////////////////////////////////////////////////////////////////////////
//
// Set up the system
//
////////////////////////////////////////////////////////////////////////
// command line arguments
// ImplicitBeamsGPU <numPartitions> <numBeamsPerSide> <solverType> <usePreconditioning> <elasticModulus> <dataFolder>
// solverType: (0) BiCGStab, (1) BiCGStab1, (2) BiCGStab2, (3) MinRes
#ifdef WITH_GLUT
bool visualize = true;
#endif
visualize = false;
double hh = 1e-3;
int numElementsPerSide = 4;
double E = 2e7;
double t_end = 5.0;
int precUpdateInterval = -1;
float precMaxKrylov = -1;
int outputInterval = 10;
double length = 1;
double r = .02;
double rho = 2200;
double nu = .3;
string data_folder = "./garbage";
// Set up variables for multi-GPU
omp_set_num_threads(numSystems);
workingThread = 0;
omp_lock_t g_lock;
omp_init_lock(&g_lock);
// Check the number of devices
int deviceCount;
hipGetDeviceCount(&deviceCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", deviceCount);
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) {
if(deviceCount>1) {
sys[sysIndex] = new ANCFSystem(sysIndex);
} else {
sys[sysIndex] = new ANCFSystem(); // Create both systems on the same device
}
sys[sysIndex]->setTimeStep(hh, 1e-10);
sys[sysIndex]->setMaxNewtonIterations(20);
sys[sysIndex]->setMaxKrylovIterations(5000);
sys[sysIndex]->numContactPoints = 30;
sys[sysIndex]->preconditionerUpdateModulus = 0; // Never perform an automatic update
sys[sysIndex]->setNumPartitions(1);
sys[sysIndex]->setSolverType(2);
sys[sysIndex]->setPrecondType(0);
sys[sysIndex]->fullJacobian = 1;
if(argc>1) {
sys[sysIndex]->setNumPartitions((int)atoi(argv[1]));
numElementsPerSide = atoi(argv[2]);
sys[sysIndex]->setSolverType((int)atoi(argv[3]));
sys[sysIndex]->setPrecondType(atoi(argv[4]));
if(atoi(argv[4])) {
sys[sysIndex]->preconditionerUpdateModulus = 0; // Never perform an automatic update
sys[sysIndex]->preconditionerMaxKrylovIterations = precMaxKrylov;
}
E = atof(argv[5]);
data_folder = argv[6];
}
}
/////////////////////////////////////////////////////////////////////////
//
// Add elements to system
//
////////////////////////////////////////////////////////////////////////
Element element;
int k = 0;
// Add elements in x-direction
for (int j = 0; j < numElementsPerSide+1; j++) {
for (int i = 0; i < numElementsPerSide; i++) {
element = Element(Node(i*length, 0, j*length, 1, 0, 0),
Node((i+1)*length, 0, j*length, 1, 0, 0),
r, nu, E, rho);
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addElement(&element);
k++;
if(k%100==0) printf("Elements %d\n",k);
}
}
// Add elements in z-direction
for (int j = 0; j < numElementsPerSide+1; j++) {
for (int i = 0; i < numElementsPerSide; i++) {
element = Element(Node(j*length, 0, i*length, 0, 0, 1),
Node(j*length, 0, (i+1)*length, 0, 0, 1),
r, nu, E, rho);
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addElement(&element);
k++;
if(k%100==0) printf("Elements %d\n",k);
}
}
/////////////////////////////////////////////////////////////////////////
//
// Add constraints to system
//
////////////////////////////////////////////////////////////////////////
// Fix corners to ground
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) {
sys[sysIndex]->addConstraint_AbsoluteSpherical(sys[sysIndex]->elements[0], 0);
sys[sysIndex]->addConstraint_AbsoluteSpherical(sys[sysIndex]->elements[2*numElementsPerSide*(numElementsPerSide+1)-numElementsPerSide], 0);
sys[sysIndex]->addConstraint_AbsoluteSpherical(sys[sysIndex]->elements[numElementsPerSide*(numElementsPerSide+1)-numElementsPerSide], 0);
sys[sysIndex]->addConstraint_AbsoluteSpherical(sys[sysIndex]->elements[2*numElementsPerSide*(numElementsPerSide+1)-1], 1);
sys[sysIndex]->addConstraint_AbsoluteSpherical(sys[sysIndex]->elements[numElementsPerSide*(numElementsPerSide+1)-1], 1);
}
// Constrain x-strands together
for(int j=0; j < numElementsPerSide+1; j++)
{
for(int i=0; i < numElementsPerSide-1; i++)
{
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addConstraint_RelativeFixed(
sys[sysIndex]->elements[i+j*numElementsPerSide], 1,
sys[sysIndex]->elements[i+1+j*numElementsPerSide], 0);
}
}
// Constrain z-strands together
int offset = numElementsPerSide*(numElementsPerSide+1);
for(int j=0; j < numElementsPerSide+1; j++)
{
for(int i=0; i < numElementsPerSide-1; i++)
{
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addConstraint_RelativeFixed(
sys[sysIndex]->elements[i+offset+j*numElementsPerSide], 1,
sys[sysIndex]->elements[i+offset+1+j*numElementsPerSide], 0);
}
}
// Constrain cross-streams together
for(int j=0; j < numElementsPerSide; j++)
{
for(int i=0; i < numElementsPerSide; i++)
{
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addConstraint_RelativeSpherical(
sys[sysIndex]->elements[i*numElementsPerSide+j], 0,
sys[sysIndex]->elements[offset+i+j*numElementsPerSide], 0);
}
}
for(int i=0; i < numElementsPerSide; i++)
{
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addConstraint_RelativeSpherical(
sys[sysIndex]->elements[numElementsPerSide-1+numElementsPerSide*i], 1,
sys[sysIndex]->elements[2*offset-numElementsPerSide+i], 0);
}
for(int i=0; i < numElementsPerSide; i++)
{
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addConstraint_RelativeSpherical(
sys[sysIndex]->elements[numElementsPerSide*(numElementsPerSide+1)+numElementsPerSide-1+numElementsPerSide*i], 1,
sys[sysIndex]->elements[numElementsPerSide*numElementsPerSide+i], 0);
}
/////////////////////////////////////////////////////////////////////////
//
// Finalize the system
//
////////////////////////////////////////////////////////////////////////
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) {
printf("%d, %d, %d\n",sys[sysIndex]->elements.size(),sys[sysIndex]->constraints.size(),12*sys[sysIndex]->elements.size()+sys[sysIndex]->constraints.size());
sys[sysIndex]->initializeSystem();
printf("System[%d] initialized!\n",sysIndex);
sys[sysIndex]->printSolverParams();
}
/////////////////////////////////////////////////////////////////////////
//
// Perform the simulation
//
////////////////////////////////////////////////////////////////////////
#ifdef WITH_GLUT
if(visualize)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(0,0);
glutInitWindowSize(1024 ,512);
glutCreateWindow("MAIN");
glutDisplayFunc(renderSceneAll);
glutIdleFunc(renderSceneAll);
glutReshapeFunc(changeSize);
glutIgnoreKeyRepeat(0);
glutKeyboardFunc(CallBackKeyboardFunc);
glutMouseFunc(CallBackMouseFunc);
glutMotionFunc(CallBackMotionFunc);
initScene();
glutMainLoop();
}
#endif
stringstream ss_m;
ss_m << data_folder << "/" << "timing_" << atoi(argv[1]) << "_" << atoi(argv[2]) << "_" << atoi(argv[3]) << "_" << atoi(argv[4]) << "_" << atof(argv[5]) << ".txt";
string timing_file_name = ss_m.str();
ofstream ofile(timing_file_name.c_str());
// if you don't want to visualize, then output the data
int fileIndex = 0;
bool updateDone = false;
#pragma omp parallel shared(updateDone, fileIndex, workingThread, g_lock)
{
int tid = omp_get_thread_num();
while(true)
{
int loc_working_thread;
omp_set_lock(&g_lock);
loc_working_thread = workingThread;
omp_unset_lock(&g_lock);
if (tid == loc_working_thread) {
// Output POV-Ray data
if(sys[tid]->timeIndex%outputInterval==0)
{
stringstream ss;
ss << data_folder << "/data_" << fileIndex << ".dat";
sys[tid]->writeToFile(ss.str());
fileIndex++;
}
// The working thread should solve the problem
omp_set_lock(&g_lock);
cout << "SYSTEM " << tid << " UPDATE TIME STEP" << "(TIME: " << sys[tid]->time << ")" << endl;
omp_unset_lock(&g_lock);
sys[tid]->DoTimeStep();
omp_set_lock(&g_lock);
cout << "SYSTEM " << tid << " UPDATE TIME STEP DONE" << endl;
omp_unset_lock(&g_lock);
// Output timing information
ofile << sys[tid]->time << ", "
<< sys[tid]->deviceIndex << ", "
<< sys[tid]->stepTime << ", "
<< sys[tid]->stepNewtonIterations << ", "
<< sys[tid]->stepKrylovIterations << ", "
<< sys[tid]->precUpdated << " , ";
for (size_t i = 0; i < sys[tid]->stepNewtonIterations; ++i)
ofile << sys[tid]->spikeSolveTime[i] << ", " << sys[tid]->spikeNumIter[i] << ", ";
ofile << endl;
}
else if (tid == abs(1 - loc_working_thread)) {
// The non-working thread should update the preconditioner
omp_set_lock(&g_lock);
cout << " SYSTEM " << tid << " UPDATE PRECONDITIONER... " << endl;
omp_unset_lock(&g_lock);
sys[tid]->setupPreconditioner();
//sys[tid]->updatePreconditioner();
omp_set_lock(&g_lock);
updateDone = true;
cout << " PRECONDITIONER UPDATE COMPLETE (" << sys[tid]->precUpdated << ")" << endl;
workingThread = abs(1 - workingThread);
omp_unset_lock(&g_lock);
}
// If the preconditioner has been updated, or the working thread fails to solve the problem, do a switch
if (updateDone) {
#pragma omp barrier
if (sys[loc_working_thread]->time >= t_end) break;
#pragma omp single
{
cout << "SWITCH SYSTEM " << loc_working_thread << " -> " << abs(1-loc_working_thread) << "... ";
sys[loc_working_thread]->transferState(sys[abs(1-loc_working_thread)]);
updateDone = false;
cout << "SWITCH COMPLETE." << endl;
}
}
}
}
ofile.close();
return 0;
}
| be5ee13076b06c3612238fc9730810d6e5a971cb.cu | #include "include.cuh"
#include "ANCFSystem.cuh"
#include "Element.cuh"
#include "Node.cuh"
#include "Particle.cuh"
bool updateDraw = 1;
bool showSphere = 1;
// Create the system (placed outside of main so it is available to the OpenGL code)
int workingThread = 0;
const int numSystems = 2;
ANCFSystem* sys[numSystems];
#ifdef WITH_GLUT
OpenGLCamera oglcamera(camreal3(-1,1,-1),camreal3(0,0,0),camreal3(0,1,0),.01);
// OPENGL RENDERING CODE //
void changeSize(int w, int h) {
if(h == 0) {h = 1;}
float ratio = 1.0* w / h;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glViewport(0, 0, w, h);
gluPerspective(45,ratio,.1,1000);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0.0,0.0,0.0, 0.0,0.0,-7, 0.0f,1.0f,0.0f);
}
void initScene(){
GLfloat light_position[] = { 1.0, 1.0, 1.0, 0.0 };
glClearColor (1.0, 1.0, 1.0, 0.0);
glShadeModel (GL_SMOOTH);
glEnable(GL_COLOR_MATERIAL);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable (GL_POINT_SMOOTH);
glEnable (GL_BLEND);
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glHint (GL_POINT_SMOOTH_HINT, GL_DONT_CARE);
}
void drawAll()
{
if(updateDraw){
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
glFrontFace(GL_CCW);
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
glDepthFunc(GL_LEQUAL);
glClearDepth(1.0);
glPointSize(2);
glLoadIdentity();
oglcamera.Update();
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) {
glColor3f(0.0f,0.0f,1.0f);
if(sysIndex==1) glColor3f(1.0f,0.0f,0.0f);
for(int i=0;i<sys[sysIndex]->elements.size();i++)
{
int xiDiv = sys[sysIndex]->numContactPoints;
double xiInc = 1/(static_cast<double>(xiDiv-1));
if(showSphere)
{
//glColor3f(0.0f,0.0f,1.0f);
for(int j=0;j<xiDiv;j++)
{
glPushMatrix();
float3 position = sys[sysIndex]->getXYZPosition(i,xiInc*j);
glTranslatef(position.x,position.y,position.z);
glutSolidSphere(sys[sysIndex]->elements[i].getRadius(),10,10);
glPopMatrix();
}
}
else
{
int xiDiv = sys[sysIndex]->numContactPoints;
double xiInc = 1/(static_cast<double>(xiDiv-1));
glLineWidth(sys[sysIndex]->elements[i].getRadius()*500);
//glColor3f(0.0f,1.0f,0.0f);
glBegin(GL_LINE_STRIP);
for(int j=0;j<sys[sysIndex]->numContactPoints;j++)
{
float3 position = sys[sysIndex]->getXYZPosition(i,xiInc*j);
glVertex3f(position.x,position.y,position.z);
}
glEnd();
glFlush();
}
}
}
glutSwapBuffers();
}
}
void renderSceneAll(){
if(OGL){
//if(sys->timeIndex%10==0)
drawAll();
// Figure out the non-working thread (based on working thread)
int nonWorkingThread = 1;
if(workingThread) nonWorkingThread = 0;
// The working thread will perform the time step while the non-working thread updates the preconditioner
cout << "SYSTEM " << workingThread << " UPDATE TIME STEP" << "(TIME: " << sys[workingThread]->time << ")" << endl;
sys[workingThread]->DoTimeStep();
cout << " SYSTEM " << nonWorkingThread << " UPDATE PRECONDITIONER... " << endl;
if(!sys[nonWorkingThread]->precUpdated) sys[nonWorkingThread]->updatePreconditioner();
cout << " PRECONDITIONER UPDATE COMPLETE (" << sys[nonWorkingThread]->precUpdated << ")" << endl;
// When the preconditioner is ready, switch the jobs of the systems
if(sys[workingThread]->timeIndex%1 == 0) {
cout << "SWITCH SYSTEM " << workingThread << " -> " << abs(1-workingThread) << "... ";
sys[workingThread]->transferState(sys[nonWorkingThread]);
cout << "SWITCH COMPLETE." << endl;
workingThread = nonWorkingThread;
}
}
}
void CallBackKeyboardFunc(unsigned char key, int x, int y) {
switch (key) {
case 'w':
oglcamera.Forward();
break;
case 's':
oglcamera.Back();
break;
case 'd':
oglcamera.Right();
break;
case 'a':
oglcamera.Left();
break;
case 'q':
oglcamera.Up();
break;
case 'e':
oglcamera.Down();
break;
}
}
void CallBackMouseFunc(int button, int state, int x, int y) {
oglcamera.SetPos(button, state, x, y);
}
void CallBackMotionFunc(int x, int y) {
oglcamera.Move2D(x, y);
}
#endif
// END OPENGL RENDERING CODE //
int main(int argc, char** argv)
{
/////////////////////////////////////////////////////////////////////////
//
// Set up the system
//
////////////////////////////////////////////////////////////////////////
// command line arguments
// ImplicitBeamsGPU <numPartitions> <numBeamsPerSide> <solverType> <usePreconditioning> <elasticModulus> <dataFolder>
// solverType: (0) BiCGStab, (1) BiCGStab1, (2) BiCGStab2, (3) MinRes
#ifdef WITH_GLUT
bool visualize = true;
#endif
visualize = false;
double hh = 1e-3;
int numElementsPerSide = 4;
double E = 2e7;
double t_end = 5.0;
int precUpdateInterval = -1;
float precMaxKrylov = -1;
int outputInterval = 10;
double length = 1;
double r = .02;
double rho = 2200;
double nu = .3;
string data_folder = "./garbage";
// Set up variables for multi-GPU
omp_set_num_threads(numSystems);
workingThread = 0;
omp_lock_t g_lock;
omp_init_lock(&g_lock);
// Check the number of devices
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", deviceCount);
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) {
if(deviceCount>1) {
sys[sysIndex] = new ANCFSystem(sysIndex);
} else {
sys[sysIndex] = new ANCFSystem(); // Create both systems on the same device
}
sys[sysIndex]->setTimeStep(hh, 1e-10);
sys[sysIndex]->setMaxNewtonIterations(20);
sys[sysIndex]->setMaxKrylovIterations(5000);
sys[sysIndex]->numContactPoints = 30;
sys[sysIndex]->preconditionerUpdateModulus = 0; // Never perform an automatic update
sys[sysIndex]->setNumPartitions(1);
sys[sysIndex]->setSolverType(2);
sys[sysIndex]->setPrecondType(0);
sys[sysIndex]->fullJacobian = 1;
if(argc>1) {
sys[sysIndex]->setNumPartitions((int)atoi(argv[1]));
numElementsPerSide = atoi(argv[2]);
sys[sysIndex]->setSolverType((int)atoi(argv[3]));
sys[sysIndex]->setPrecondType(atoi(argv[4]));
if(atoi(argv[4])) {
sys[sysIndex]->preconditionerUpdateModulus = 0; // Never perform an automatic update
sys[sysIndex]->preconditionerMaxKrylovIterations = precMaxKrylov;
}
E = atof(argv[5]);
data_folder = argv[6];
}
}
/////////////////////////////////////////////////////////////////////////
//
// Add elements to system
//
////////////////////////////////////////////////////////////////////////
Element element;
int k = 0;
// Add elements in x-direction
for (int j = 0; j < numElementsPerSide+1; j++) {
for (int i = 0; i < numElementsPerSide; i++) {
element = Element(Node(i*length, 0, j*length, 1, 0, 0),
Node((i+1)*length, 0, j*length, 1, 0, 0),
r, nu, E, rho);
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addElement(&element);
k++;
if(k%100==0) printf("Elements %d\n",k);
}
}
// Add elements in z-direction
for (int j = 0; j < numElementsPerSide+1; j++) {
for (int i = 0; i < numElementsPerSide; i++) {
element = Element(Node(j*length, 0, i*length, 0, 0, 1),
Node(j*length, 0, (i+1)*length, 0, 0, 1),
r, nu, E, rho);
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addElement(&element);
k++;
if(k%100==0) printf("Elements %d\n",k);
}
}
/////////////////////////////////////////////////////////////////////////
//
// Add constraints to system
//
////////////////////////////////////////////////////////////////////////
// Fix corners to ground
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) {
sys[sysIndex]->addConstraint_AbsoluteSpherical(sys[sysIndex]->elements[0], 0);
sys[sysIndex]->addConstraint_AbsoluteSpherical(sys[sysIndex]->elements[2*numElementsPerSide*(numElementsPerSide+1)-numElementsPerSide], 0);
sys[sysIndex]->addConstraint_AbsoluteSpherical(sys[sysIndex]->elements[numElementsPerSide*(numElementsPerSide+1)-numElementsPerSide], 0);
sys[sysIndex]->addConstraint_AbsoluteSpherical(sys[sysIndex]->elements[2*numElementsPerSide*(numElementsPerSide+1)-1], 1);
sys[sysIndex]->addConstraint_AbsoluteSpherical(sys[sysIndex]->elements[numElementsPerSide*(numElementsPerSide+1)-1], 1);
}
// Constrain x-strands together
for(int j=0; j < numElementsPerSide+1; j++)
{
for(int i=0; i < numElementsPerSide-1; i++)
{
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addConstraint_RelativeFixed(
sys[sysIndex]->elements[i+j*numElementsPerSide], 1,
sys[sysIndex]->elements[i+1+j*numElementsPerSide], 0);
}
}
// Constrain z-strands together
int offset = numElementsPerSide*(numElementsPerSide+1);
for(int j=0; j < numElementsPerSide+1; j++)
{
for(int i=0; i < numElementsPerSide-1; i++)
{
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addConstraint_RelativeFixed(
sys[sysIndex]->elements[i+offset+j*numElementsPerSide], 1,
sys[sysIndex]->elements[i+offset+1+j*numElementsPerSide], 0);
}
}
// Constrain cross-streams together
for(int j=0; j < numElementsPerSide; j++)
{
for(int i=0; i < numElementsPerSide; i++)
{
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addConstraint_RelativeSpherical(
sys[sysIndex]->elements[i*numElementsPerSide+j], 0,
sys[sysIndex]->elements[offset+i+j*numElementsPerSide], 0);
}
}
for(int i=0; i < numElementsPerSide; i++)
{
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addConstraint_RelativeSpherical(
sys[sysIndex]->elements[numElementsPerSide-1+numElementsPerSide*i], 1,
sys[sysIndex]->elements[2*offset-numElementsPerSide+i], 0);
}
for(int i=0; i < numElementsPerSide; i++)
{
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) sys[sysIndex]->addConstraint_RelativeSpherical(
sys[sysIndex]->elements[numElementsPerSide*(numElementsPerSide+1)+numElementsPerSide-1+numElementsPerSide*i], 1,
sys[sysIndex]->elements[numElementsPerSide*numElementsPerSide+i], 0);
}
/////////////////////////////////////////////////////////////////////////
//
// Finalize the system
//
////////////////////////////////////////////////////////////////////////
for(int sysIndex = 0; sysIndex < numSystems; sysIndex++) {
printf("%d, %d, %d\n",sys[sysIndex]->elements.size(),sys[sysIndex]->constraints.size(),12*sys[sysIndex]->elements.size()+sys[sysIndex]->constraints.size());
sys[sysIndex]->initializeSystem();
printf("System[%d] initialized!\n",sysIndex);
sys[sysIndex]->printSolverParams();
}
/////////////////////////////////////////////////////////////////////////
//
// Perform the simulation
//
////////////////////////////////////////////////////////////////////////
#ifdef WITH_GLUT
if(visualize)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(0,0);
glutInitWindowSize(1024 ,512);
glutCreateWindow("MAIN");
glutDisplayFunc(renderSceneAll);
glutIdleFunc(renderSceneAll);
glutReshapeFunc(changeSize);
glutIgnoreKeyRepeat(0);
glutKeyboardFunc(CallBackKeyboardFunc);
glutMouseFunc(CallBackMouseFunc);
glutMotionFunc(CallBackMotionFunc);
initScene();
glutMainLoop();
}
#endif
stringstream ss_m;
ss_m << data_folder << "/" << "timing_" << atoi(argv[1]) << "_" << atoi(argv[2]) << "_" << atoi(argv[3]) << "_" << atoi(argv[4]) << "_" << atof(argv[5]) << ".txt";
string timing_file_name = ss_m.str();
ofstream ofile(timing_file_name.c_str());
// if you don't want to visualize, then output the data
int fileIndex = 0;
bool updateDone = false;
#pragma omp parallel shared(updateDone, fileIndex, workingThread, g_lock)
{
int tid = omp_get_thread_num();
while(true)
{
int loc_working_thread;
omp_set_lock(&g_lock);
loc_working_thread = workingThread;
omp_unset_lock(&g_lock);
if (tid == loc_working_thread) {
// Output POV-Ray data
if(sys[tid]->timeIndex%outputInterval==0)
{
stringstream ss;
ss << data_folder << "/data_" << fileIndex << ".dat";
sys[tid]->writeToFile(ss.str());
fileIndex++;
}
// The working thread should solve the problem
omp_set_lock(&g_lock);
cout << "SYSTEM " << tid << " UPDATE TIME STEP" << "(TIME: " << sys[tid]->time << ")" << endl;
omp_unset_lock(&g_lock);
sys[tid]->DoTimeStep();
omp_set_lock(&g_lock);
cout << "SYSTEM " << tid << " UPDATE TIME STEP DONE" << endl;
omp_unset_lock(&g_lock);
// Output timing information
ofile << sys[tid]->time << ", "
<< sys[tid]->deviceIndex << ", "
<< sys[tid]->stepTime << ", "
<< sys[tid]->stepNewtonIterations << ", "
<< sys[tid]->stepKrylovIterations << ", "
<< sys[tid]->precUpdated << " , ";
for (size_t i = 0; i < sys[tid]->stepNewtonIterations; ++i)
ofile << sys[tid]->spikeSolveTime[i] << ", " << sys[tid]->spikeNumIter[i] << ", ";
ofile << endl;
}
else if (tid == abs(1 - loc_working_thread)) {
// The non-working thread should update the preconditioner
omp_set_lock(&g_lock);
cout << " SYSTEM " << tid << " UPDATE PRECONDITIONER... " << endl;
omp_unset_lock(&g_lock);
sys[tid]->setupPreconditioner();
//sys[tid]->updatePreconditioner();
omp_set_lock(&g_lock);
updateDone = true;
cout << " PRECONDITIONER UPDATE COMPLETE (" << sys[tid]->precUpdated << ")" << endl;
workingThread = abs(1 - workingThread);
omp_unset_lock(&g_lock);
}
// If the preconditioner has been updated, or the working thread fails to solve the problem, do a switch
if (updateDone) {
#pragma omp barrier
if (sys[loc_working_thread]->time >= t_end) break;
#pragma omp single
{
cout << "SWITCH SYSTEM " << loc_working_thread << " -> " << abs(1-loc_working_thread) << "... ";
sys[loc_working_thread]->transferState(sys[abs(1-loc_working_thread)]);
updateDone = false;
cout << "SWITCH COMPLETE." << endl;
}
}
}
}
ofile.close();
return 0;
}
|
988dcbc740f88222a101726f12ce826034d34367.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <omp.h>
#include <math.h>
#define Zad5
#ifdef Zad1
int coresPerSM(hipDeviceProp_t prop) {
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{-1, -1} };
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((prop.major << 4) + prop.minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
prop.major, prop.minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
int main()
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("Nazwa urzadzenia: %s\n", prop.name);
printf("Czestotliwosc zegara [KHz]: %d\n", prop.memoryClockRate);
printf("Przepustowosc pamieci [bity]: %d\n", prop.memoryBusWidth);
printf("Compute Capability: %d\n", coresPerSM(prop));
printf("Liczba multiprocesorow: %d\n", prop.multiProcessorCount);
printf("Liczba rdzeni: %d\n", (coresPerSM(prop)) * prop.multiProcessorCount);
hipSetDevice(0);
hipDeviceReset();
return 0;
}
#endif
#ifdef Zad2
int main()
{
hipSetDevice(0);
char *charmib1, *charmib8, *charmib96, *charmib256, *a1, *a8, *a96, *a256;
int *intmib1, *intmib8, *intmib96, *intmib256, *b1, *b8, *b96, *b256;
float *floatmib1, *floatmib8, *floatmib96, *floatmib256, *c1, *c8, *c96, *c256;
double *doublemib1, *doublemib8, *doublemib96, *doublemib256, *d1, *d8, *d96, *d256;
charmib1 = new char[1024 * 1024];
charmib8 = new char[8 * 1024 * 1024];
charmib96 = new char[96 * 1024 * 1024];
charmib256 = new char[256 * 1024 * 1024];
intmib1 = new int[1024 * 1024 / 4];
intmib8 = new int[2 * 1024 * 1024];
intmib96 = new int[24 * 1024 * 1024];
intmib256 = new int[64 * 1024 * 1024];
floatmib1 = new float[1024 * 1024 / 4];
floatmib8 = new float[2 * 1024 * 1024];
floatmib96 = new float[24 * 1024 * 1024];
floatmib256 = new float[64 * 1024 * 1024];
doublemib1 = new double[1024 * 1024 / 8];
doublemib8 = new double[1024 * 1024];
doublemib96 = new double[12 * 1024 * 1024];
doublemib256 = new double[32 * 1024 * 1024];
hipMalloc(&a1, 1024 * 1024 * sizeof(char));
hipMalloc(&a8, 1024 * 1024 * 8 * sizeof(char));
hipMalloc(&a96, 1024 * 1024 * 96 * sizeof(char));
hipMalloc(&a256, 1024 * 1024 * 256 * sizeof(char));
hipMalloc(&b1, 1024 * 1024 * sizeof(int) / 4);
hipMalloc(&b8, 1024 * 1024 * 2 * sizeof(int));
hipMalloc(&b96, 1024 * 1024 * 24 * sizeof(int));
hipMalloc(&b256, 1024 * 1024 * 64 * sizeof(int));
hipMalloc(&c1, 1024 * 1024 * sizeof(float) / 4);
hipMalloc(&c8, 1024 * 1024 * 2 * sizeof(float));
hipMalloc(&c96, 1024 * 1024 * 24 * sizeof(float));
hipMalloc(&c256, 1024 * 1024 * 64 * sizeof(float));
hipMalloc(&d1, 1024 * 1024 * sizeof(double) / 8);
hipMalloc(&d8, 1024 * 1024 * sizeof(double));
hipMalloc(&d96, 1024 * 1024 * 12 * sizeof(double));
hipMalloc(&d256, 1024 * 1024 * 32 * sizeof(double));
hipEvent_t start, stop;
float czas;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMemcpy(a1, charmib1, 1024 * 1024 * sizeof(char), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (char, 1 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(a8, charmib8, 1024 * 1024 * 8 *sizeof(char), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (char, 8 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(a96, charmib96, 1024 * 1024 * 96 *sizeof(char), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (char, 96 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(a256, charmib256, 1024 * 1024 * 256 * sizeof(char), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (char, 256 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(b1, intmib1, 1024 * 1024 * sizeof(int) / 4, hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (int, 1 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(b8, intmib8, 1024 * 1024 * 2 * sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (int, 8 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(b96, intmib96, 1024 * 1024 * 24 * sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (int, 96 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(b256, intmib256, 1024 * 1024 * 64 * sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (int, 256 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(c1, floatmib1, 1024 * 1024 * sizeof(float) / 4, hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (float, 1 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(c8, floatmib8, 1024 * 1024 * 2 * sizeof(float), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (float, 8 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(c96, floatmib96, 1024 * 1024 * 24 * sizeof(float), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (float, 96 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(c256, floatmib256, 1024 * 1024 * 64 * sizeof(float), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (float, 256 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(d1, doublemib1, 1024 * 1024 * sizeof(double) / 8, hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (double, 1 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(d8, doublemib8, 1024 * 1024 * sizeof(double), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (double, 8 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(d96, doublemib96, 1024 * 1024 * 12 * sizeof(double), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (double, 96 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(d256, doublemib256, 1024 * 1024 * 32 * sizeof(double), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (double, 256 MiB) [ms]: %f\n\n", czas);
hipEventRecord(start, 0);
hipMemcpy(charmib1, a1, 1024 * 1024 * sizeof(char), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (char, 1 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(charmib8, a8, 1024 * 1024 * 8 * sizeof(char), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (char, 8 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(charmib96, a96, 1024 * 1024 * 64 * sizeof(char), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (char, 96 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(charmib256, a256, 1024 * 1024 * 256 * sizeof(char), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (char, 256 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(intmib1, b1, 1024 * 1024 * sizeof(int) / 4, hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (int, 1 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(intmib8, b8, 1024 * 1024 * 2 * sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (int, 8 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(intmib96, b96, 1024 * 1024 * 24 * sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (int, 96 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(intmib256, b256, 1024 * 1024 * 64 * sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (int, 256 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(floatmib1, c1, 1024 * 1024 * sizeof(float) / 4, hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (float, 1 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(floatmib8, c8, 1024 * 1024 * 2 * sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (float, 8 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(floatmib96, c96, 1024 * 1024 * 24 * sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (float, 96 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(floatmib256, c256, 1024 * 1024 * 64 * sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (float, 256 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(doublemib1, d1, 1024 * 1024 * sizeof(double) / 8, hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (double, 1 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(doublemib8, d8, 1024 * 1024 * sizeof(double), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (double, 8 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(doublemib96, d96, 1024 * 1024 * 12 * sizeof(double), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (double, 96 MiB) [ms]: %f\n", czas);
hipEventRecord(start, 0);
hipMemcpy(doublemib256, d256, 1024 * 1024 * 32 * sizeof(double), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (double, 256 MiB) [ms]: %f\n", czas);
delete[] charmib1;
delete[] charmib8;
delete[] charmib96;
delete[] charmib256;
delete[] intmib1;
delete[] intmib8;
delete[] intmib96;
delete[] intmib256;
delete[] floatmib1;
delete[] floatmib8;
delete[] floatmib96;
delete[] floatmib256;
delete[] doublemib1;
delete[] doublemib8;
delete[] doublemib96;
delete[] doublemib256;
hipFree(a1);
hipFree(a8);
hipFree(a96);
hipFree(a256);
hipFree(b1);
hipFree(b8);
hipFree(b96);
hipFree(b256);
hipFree(c1);
hipFree(c8);
hipFree(c96);
hipFree(c256);
hipFree(d1);
hipFree(d8);
hipFree(d96);
hipFree(d256);
hipDeviceReset();
return 0;
}
#endif
#ifdef Zad3
__global__ void kernelMnozenie(int *a, int *b, int *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] * b[i];
}
__global__ void kernelDodawanie(int *a, int *b, int *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void kernelPotegowanie(int *a, int *b, int *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int wynik = 1;
for (int j = 0; j < b[i]; j++)
{
wynik *= a[i];
}
c[i] = wynik;
}
__global__ void kernelMnozenie(float *a, float *b, float *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] * b[i];
}
__global__ void kernelDodawanie(float *a, float *b, float *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void kernelPotegowanie(float *a, float *b, float *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
float wynik = 1;
for (float j = 0; j < b[i]; j++)
{
wynik *= a[i];
}
c[i] = wynik;
}
__global__ void kernelMnozenie(double *a, double *b, double *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] * b[i];
}
__global__ void kernelDodawanie(double *a, double *b, double *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void kernelPotegowanie(double *a, double *b, double *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
double wynik = 1;
for (double j = 0; j < b[i]; j++)
{
wynik *= a[i];
}
c[i] = wynik;
}
void dodawanieCPU(int *a, int *b, int *c, int rozmiar);
void mnozenieCPU(int *a, int *b, int *c, int rozmiar);
void potegowanieCPU(int *a, int *b, int *c, int rozmiar);
void dodawanieCPU(float *a, float *b, float *c, int rozmiar);
void mnozenieCPU(float *a, float *b, float *c, int rozmiar);
void potegowanieCPU(float *a, float *b, float *c, int rozmiar);
void dodawanieCPU(double *a, double *b, double *c, int rozmiar);
void mnozenieCPU(double *a, double *b, double *c, int rozmiar);
void potegowanieCPU(double *a, double *b, double *c, int rozmiar);
int main()
{
hipSetDevice(0);
/*const int rozmiar = 9;
int a[rozmiar] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
int b[rozmiar] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
int c[rozmiar] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int *dev_a;
int *dev_b;
int *dev_c;
hipMalloc(&dev_a, rozmiar * sizeof(int));
hipMalloc(&dev_b, rozmiar * sizeof(int));
hipMalloc(&dev_c, rozmiar * sizeof(int));
hipMemcpy(dev_a, a, rozmiar * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, rozmiar * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_c, c, rozmiar * sizeof(int), hipMemcpyHostToDevice);
kernelDodawanie << <1, rozmiar >> > (dev_a, dev_b, dev_c);
hipDeviceSynchronize();
hipMemcpy(c, dev_c, rozmiar * sizeof(int), hipMemcpyDeviceToHost);
printf("\nDodawanie GPU\n");
for (int i = 0; i < rozmiar; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
kernelMnozenie << <1, rozmiar >> > (dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, rozmiar * sizeof(int), hipMemcpyDeviceToHost);
printf("\nMnozenie GPU\n");
for (int i = 0; i < rozmiar; i++)
{
printf("%d * %d = %d\n", a[i], b[i], c[i]);
}
kernelPotegowanie << <1, rozmiar >> > (dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, rozmiar * sizeof(int), hipMemcpyDeviceToHost);
printf("\nPotegowanie GPU\n");
for (int i = 0; i < rozmiar; i++)
{
printf("%d ^ %d = %d\n", a[i], b[i], c[i]);
}
dodawanieCPU(a, b, c, rozmiar);
printf("\nDodawanie CPU\n");
for (int i = 0; i < rozmiar; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
mnozenieCPU(a, b, c, rozmiar);
printf("\nMnozenie CPU\n");
for (int i = 0; i < rozmiar; i++)
{
printf("%d * %d = %d\n", a[i], b[i], c[i]);
}
potegowanieCPU(a, b, c, rozmiar);
printf("\nPotegowanie CPU\n");
for (int i = 0; i < rozmiar; i++)
{
printf("%d ^ %d = %d\n", a[i], b[i], c[i]);
}
*/
const int rozmiar = 1024 * 1024;
int liczbaBlokow;
int rozmiarBloku = 1024;
int *aint1 = new int[rozmiar / 4];
int *bint1 = new int[rozmiar / 4];
int *cint1 = new int[rozmiar / 4];
int *aint4 = new int[rozmiar];
int *bint4 = new int[rozmiar];
int *cint4 = new int[rozmiar];
int *aint8 = new int[rozmiar * 2];
int *bint8 = new int[rozmiar * 2];
int *cint8 = new int[rozmiar * 2];
int *aint16 = new int[rozmiar * 4];
int *bint16 = new int[rozmiar * 4];
int *cint16 = new int[rozmiar * 4];
float *afloat1 = new float[rozmiar / 4];
float *bfloat1 = new float[rozmiar / 4];
float *cfloat1 = new float[rozmiar / 4];
float *afloat4 = new float[rozmiar];
float *bfloat4 = new float[rozmiar];
float *cfloat4 = new float[rozmiar];
float *afloat8 = new float[rozmiar * 2];
float *bfloat8 = new float[rozmiar * 2];
float *cfloat8 = new float[rozmiar * 2];
float *afloat16 = new float[rozmiar * 4];
float *bfloat16 = new float[rozmiar * 4];
float *cfloat16 = new float[rozmiar * 4];
double *adouble1 = new double[rozmiar / 8];
double *bdouble1 = new double[rozmiar / 8];
double *cdouble1 = new double[rozmiar / 8];
double *adouble4 = new double[rozmiar / 2];
double *bdouble4 = new double[rozmiar / 2];
double *cdouble4 = new double[rozmiar / 2];
double *adouble8 = new double[rozmiar];
double *bdouble8 = new double[rozmiar];
double *cdouble8 = new double[rozmiar];
double *adouble16 = new double[rozmiar * 2];
double *bdouble16 = new double[rozmiar * 2];
double *cdouble16 = new double[rozmiar * 2];
int *dev_aint1;
int *dev_aint4;
int *dev_aint8;
int *dev_aint16;
int *dev_bint1;
int *dev_bint4;
int *dev_bint8;
int *dev_bint16;
int *dev_cint1;
int *dev_cint4;
int *dev_cint8;
int *dev_cint16;
float *dev_afloat1;
float *dev_afloat4;
float *dev_afloat8;
float *dev_afloat16;
float *dev_bfloat1;
float *dev_bfloat4;
float *dev_bfloat8;
float *dev_bfloat16;
float *dev_cfloat1;
float *dev_cfloat4;
float *dev_cfloat8;
float *dev_cfloat16;
double *dev_adouble1;
double *dev_adouble4;
double *dev_adouble8;
double *dev_adouble16;
double *dev_bdouble1;
double *dev_bdouble4;
double *dev_bdouble8;
double *dev_bdouble16;
double *dev_cdouble1;
double *dev_cdouble4;
double *dev_cdouble8;
double *dev_cdouble16;
hipMalloc(&dev_aint1, rozmiar * sizeof(int) / 4);
hipMalloc(&dev_aint4, rozmiar * sizeof(int));
hipMalloc(&dev_aint8, rozmiar * sizeof(int) * 2);
hipMalloc(&dev_aint16, rozmiar * sizeof(int) * 4);
hipMalloc(&dev_bint1, rozmiar * sizeof(int) / 4);
hipMalloc(&dev_bint4, rozmiar * sizeof(int));
hipMalloc(&dev_bint8, rozmiar * sizeof(int) * 2);
hipMalloc(&dev_bint16, rozmiar * sizeof(int) * 4);
hipMalloc(&dev_cint1, rozmiar * sizeof(int)) / 4;
hipMalloc(&dev_cint4, rozmiar * sizeof(int));
hipMalloc(&dev_cint8, rozmiar * sizeof(int) * 2);
hipMalloc(&dev_cint16, rozmiar * sizeof(int) * 4);
hipMalloc(&dev_afloat1, rozmiar * sizeof(float) / 4);
hipMalloc(&dev_afloat4, rozmiar * sizeof(float));
hipMalloc(&dev_afloat8, rozmiar * sizeof(float) * 2);
hipMalloc(&dev_afloat16, rozmiar * sizeof(float) * 4);
hipMalloc(&dev_bfloat1, rozmiar * sizeof(float) / 4);
hipMalloc(&dev_bfloat4, rozmiar * sizeof(float));
hipMalloc(&dev_bfloat8, rozmiar * sizeof(float) * 2);
hipMalloc(&dev_bfloat16, rozmiar * sizeof(float) * 4);
hipMalloc(&dev_cfloat1, rozmiar * sizeof(float) / 4);
hipMalloc(&dev_cfloat4, rozmiar * sizeof(float));
hipMalloc(&dev_cfloat8, rozmiar * sizeof(float) * 2);
hipMalloc(&dev_cfloat16, rozmiar * sizeof(float) * 4);
hipMalloc(&dev_adouble1, rozmiar * sizeof(double) / 8);
hipMalloc(&dev_adouble4, rozmiar * sizeof(double) / 2);
hipMalloc(&dev_adouble8, rozmiar * sizeof(double));
hipMalloc(&dev_adouble16, rozmiar * sizeof(double) * 2);
hipMalloc(&dev_bdouble1, rozmiar * sizeof(double) / 8);
hipMalloc(&dev_bdouble4, rozmiar * sizeof(double) / 2);
hipMalloc(&dev_bdouble8, rozmiar * sizeof(double));
hipMalloc(&dev_bdouble16, rozmiar * sizeof(double) * 2);
hipMalloc(&dev_cdouble1, rozmiar * sizeof(double) / 8);
hipMalloc(&dev_cdouble4, rozmiar * sizeof(double) / 2);
hipMalloc(&dev_cdouble8, rozmiar * sizeof(double));
hipMalloc(&dev_cdouble16, rozmiar * sizeof(double) * 2);
hipMemcpy(dev_aint1, aint1, rozmiar * sizeof(int) / 4, hipMemcpyHostToDevice);
hipMemcpy(dev_aint4, aint4, rozmiar * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_aint8, aint8, rozmiar * sizeof(int) * 2, hipMemcpyHostToDevice);
hipMemcpy(dev_aint16, aint16, rozmiar * sizeof(int) * 4, hipMemcpyHostToDevice);
hipMemcpy(dev_bint1, bint1, rozmiar * sizeof(int) / 4, hipMemcpyHostToDevice);
hipMemcpy(dev_bint4, bint4, rozmiar * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_bint8, bint8, rozmiar * sizeof(int) * 2, hipMemcpyHostToDevice);
hipMemcpy(dev_bint16, bint16, rozmiar * sizeof(int) * 4, hipMemcpyHostToDevice);
hipMemcpy(dev_cint1, cint1, rozmiar * sizeof(int) / 4, hipMemcpyHostToDevice);
hipMemcpy(dev_cint4, cint4, rozmiar * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_cint8, cint8, rozmiar * sizeof(int) * 2, hipMemcpyHostToDevice);
hipMemcpy(dev_cint16, cint16, rozmiar * sizeof(int) * 4, hipMemcpyHostToDevice);
hipMemcpy(dev_afloat1, afloat1, rozmiar * sizeof(float) / 4, hipMemcpyHostToDevice);
hipMemcpy(dev_afloat4, afloat4, rozmiar * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_afloat8, afloat8, rozmiar * sizeof(float) * 2, hipMemcpyHostToDevice);
hipMemcpy(dev_afloat16, afloat16, rozmiar * sizeof(float) * 4, hipMemcpyHostToDevice);
hipMemcpy(dev_bfloat1, bfloat1, rozmiar * sizeof(float) / 4, hipMemcpyHostToDevice);
hipMemcpy(dev_bfloat4, bfloat4, rozmiar * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_bfloat8, bfloat8, rozmiar * sizeof(float) * 2, hipMemcpyHostToDevice);
hipMemcpy(dev_bfloat16, bfloat16, rozmiar * sizeof(float) * 4, hipMemcpyHostToDevice);
hipMemcpy(dev_cfloat1, cfloat1, rozmiar * sizeof(float) / 4, hipMemcpyHostToDevice);
hipMemcpy(dev_cfloat4, cfloat4, rozmiar * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_cfloat8, cfloat8, rozmiar * sizeof(float) * 2, hipMemcpyHostToDevice);
hipMemcpy(dev_cfloat16, cfloat16, rozmiar * sizeof(float) * 4, hipMemcpyHostToDevice);
hipMemcpy(dev_adouble1, adouble1, rozmiar * sizeof(double) / 8, hipMemcpyHostToDevice);
hipMemcpy(dev_adouble4, adouble4, rozmiar * sizeof(double) / 2, hipMemcpyHostToDevice);
hipMemcpy(dev_adouble8, adouble8, rozmiar * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_adouble16, adouble16, rozmiar * sizeof(double) * 2, hipMemcpyHostToDevice);
hipMemcpy(dev_bdouble1, bdouble1, rozmiar * sizeof(double) / 8, hipMemcpyHostToDevice);
hipMemcpy(dev_bdouble4, bdouble4, rozmiar * sizeof(double) / 2, hipMemcpyHostToDevice);
hipMemcpy(dev_bdouble8, bdouble8, rozmiar * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_bdouble16, bdouble16, rozmiar * sizeof(double) * 2, hipMemcpyHostToDevice);
hipMemcpy(dev_cdouble1, cdouble1, rozmiar * sizeof(double) / 8, hipMemcpyHostToDevice);
hipMemcpy(dev_cdouble4, cdouble4, rozmiar * sizeof(double) / 2, hipMemcpyHostToDevice);
hipMemcpy(dev_cdouble8, cdouble8, rozmiar * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_cdouble16, cdouble16, rozmiar * sizeof(double) * 2, hipMemcpyHostToDevice);
float czasGPU;
hipEvent_t startGPU, stopGPU;
double startCPU, stopCPU;
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 4 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint1, dev_bint1, dev_cint1);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cint1, dev_cint1, rozmiar * sizeof(int) / 4, hipMemcpyDeviceToHost);
printf("Czas dodawania GPU (int, 1MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint4, dev_bint4, dev_cint4);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cint4, dev_cint4, rozmiar * sizeof(int), hipMemcpyDeviceToHost);
printf("Czas dodawania GPU (int, 4MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint8, dev_bint8, dev_cint8);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cint8, dev_cint8, rozmiar * sizeof(int) * 2, hipMemcpyDeviceToHost);
printf("Czas dodawania GPU (int, 8MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 4 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint16, dev_bint16, dev_cint16);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cint16, dev_cint16, rozmiar * sizeof(int) * 4, hipMemcpyDeviceToHost);
printf("Czas dodawania GPU (int, 16MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 4 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat1, dev_bfloat1, dev_cfloat1);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cfloat1, dev_cfloat1, rozmiar * sizeof(float) / 4, hipMemcpyDeviceToHost);
printf("Czas dodawania GPU (float, 1MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat4, dev_bfloat4, dev_cfloat4);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cfloat4, dev_cfloat4, rozmiar * sizeof(float), hipMemcpyDeviceToHost);
printf("Czas dodawania GPU (float, 4MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat8, dev_bfloat8, dev_cfloat8);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cfloat8, dev_cfloat8, rozmiar * sizeof(float) * 2, hipMemcpyDeviceToHost);
printf("Czas dodawania GPU (float, 8MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 4 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat16, dev_bfloat16, dev_cfloat16);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cfloat16, dev_cfloat16, rozmiar * sizeof(float) * 4, hipMemcpyDeviceToHost);
printf("Czas dodawania GPU (float, 16MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 8 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble1, dev_bdouble1, dev_cdouble1);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cdouble1, dev_cdouble1, rozmiar * sizeof(double) / 8, hipMemcpyDeviceToHost);
printf("Czas dodawania GPU (double, 1MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 2 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble4, dev_bdouble4, dev_cdouble4);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cdouble4, dev_cdouble4, rozmiar * sizeof(double) / 2, hipMemcpyDeviceToHost);
printf("Czas dodawania GPU (double, 4MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble8, dev_bdouble8, dev_cdouble8);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cdouble8, dev_cdouble8, rozmiar * sizeof(double), hipMemcpyDeviceToHost);
printf("Czas dodawania GPU (double, 8MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble16, dev_bdouble16, dev_cdouble16);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cdouble16, dev_cdouble16, rozmiar * sizeof(double) * 2, hipMemcpyDeviceToHost);
printf("Czas dodawania GPU (double, 16MiB) [ms]: %f\n\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 4 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_aint1, dev_bint1, dev_cint1);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cint1, dev_cint1, rozmiar * sizeof(int) / 4, hipMemcpyDeviceToHost);
printf("Czas mnozenia GPU (int, 1MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_aint4, dev_bint4, dev_cint4);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cint4, dev_cint4, rozmiar * sizeof(int), hipMemcpyDeviceToHost);
printf("Czas mnozenia GPU (int, 4MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_aint8, dev_bint8, dev_cint8);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cint8, dev_cint8, rozmiar * sizeof(int) * 2, hipMemcpyDeviceToHost);
printf("Czas mnozenia GPU (int, 8MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 4 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_aint16, dev_bint16, dev_cint16);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cint16, dev_cint16, rozmiar * sizeof(int) * 4, hipMemcpyDeviceToHost);
printf("Czas mnozenia GPU (int, 16MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 4 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat1, dev_bfloat1, dev_cfloat1);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cfloat1, dev_cfloat1, rozmiar * sizeof(float) / 4, hipMemcpyDeviceToHost);
printf("Czas mnozenia GPU (float, 1MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat4, dev_bfloat4, dev_cfloat4);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cfloat4, dev_cfloat4, rozmiar * sizeof(float), hipMemcpyDeviceToHost);
printf("Czas mnozenia GPU (float, 4MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat8, dev_bfloat8, dev_cfloat8);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cfloat8, dev_cfloat8, rozmiar * sizeof(float) * 2, hipMemcpyDeviceToHost);
printf("Czas mnozenia GPU (float, 8MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 4 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat16, dev_bfloat16, dev_cfloat16);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cfloat16, dev_cfloat16, rozmiar * sizeof(float) * 4, hipMemcpyDeviceToHost);
printf("Czas mnozenia GPU (float, 16MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 8 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble1, dev_bdouble1, dev_cdouble1);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cdouble1, dev_cdouble1, rozmiar * sizeof(double) / 8, hipMemcpyDeviceToHost);
printf("Czas mnozenia GPU (double, 1MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 2 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble4, dev_bdouble4, dev_cdouble4);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cdouble4, dev_cdouble4, rozmiar * sizeof(double) / 2, hipMemcpyDeviceToHost);
printf("Czas mnozenia GPU (double, 4MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble8, dev_bdouble8, dev_cdouble8);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cdouble8, dev_cdouble8, rozmiar * sizeof(double), hipMemcpyDeviceToHost);
printf("Czas mnozenia GPU (double, 8MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble16, dev_bdouble16, dev_cdouble16);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cdouble16, dev_cdouble16, rozmiar * sizeof(double) * 2, hipMemcpyDeviceToHost);
printf("Czas mnozenia GPU (double, 16MiB) [ms]: %f\n\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 4 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint1, dev_bint1, dev_cint1);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cint1, dev_cint1, rozmiar * sizeof(int) / 4, hipMemcpyDeviceToHost);
printf("Czas potegowania GPU (int, 1MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint4, dev_bint4, dev_cint4);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cint4, dev_cint4, rozmiar * sizeof(int), hipMemcpyDeviceToHost);
printf("Czas potegowania GPU (int, 4MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint8, dev_bint8, dev_cint8);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cint8, dev_cint8, rozmiar * sizeof(int) * 2, hipMemcpyDeviceToHost);
printf("Czas potegowania GPU (int, 8MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 4 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint16, dev_bint16, dev_cint16);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cint16, dev_cint16, rozmiar * sizeof(int) * 4, hipMemcpyDeviceToHost);
printf("Czas potegowania GPU (int, 16MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 4 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat1, dev_bfloat1, dev_cfloat1);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cfloat1, dev_cfloat1, rozmiar * sizeof(float) / 4, hipMemcpyDeviceToHost);
printf("Czas potegowania GPU (float, 1MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat4, dev_bfloat4, dev_cfloat4);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cfloat4, dev_cfloat4, rozmiar * sizeof(float), hipMemcpyDeviceToHost);
printf("Czas potegowania GPU (float, 4MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat8, dev_bfloat8, dev_cfloat8);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cfloat8, dev_cfloat8, rozmiar * sizeof(float) * 2, hipMemcpyDeviceToHost);
printf("Czas potegowania GPU (float, 8MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 4 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat16, dev_bfloat16, dev_cfloat16);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cfloat16, dev_cfloat16, rozmiar * sizeof(float) * 4, hipMemcpyDeviceToHost);
printf("Czas potegowania GPU (float, 16MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 8 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble1, dev_bdouble1, dev_cdouble1);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cdouble1, dev_cdouble1, rozmiar * sizeof(double) / 8, hipMemcpyDeviceToHost);
printf("Czas potegowania GPU (double, 1MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 2 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble4, dev_bdouble4, dev_cdouble4);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cdouble4, dev_cdouble4, rozmiar * sizeof(double) / 2, hipMemcpyDeviceToHost);
printf("Czas potegowania GPU (double, 4MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble8, dev_bdouble8, dev_cdouble8);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cdouble8, dev_cdouble8, rozmiar * sizeof(double), hipMemcpyDeviceToHost);
printf("Czas potegowania GPU (double, 8MiB) [ms]: %f\n", czasGPU);
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
hipEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble16, dev_bdouble16, dev_cdouble16);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
hipMemcpy(cdouble16, dev_cdouble16, rozmiar * sizeof(double) * 2, hipMemcpyDeviceToHost);
printf("Czas potegowaniaGPU (double, 16MiB) [ms]: %f\n\n", czasGPU);
startCPU = omp_get_wtime();
dodawanieCPU(aint1, bint1, cint1, rozmiar / 4);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (int, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(aint4, bint4, cint4, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (int, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(aint8, bint8, cint8, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (int, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(aint16, bint16, cint16, rozmiar * 4);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (int, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(afloat1, bfloat1, cfloat1, rozmiar / 4);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (float, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(afloat4, bfloat4, cfloat4, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (float, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(afloat8, bfloat8, cfloat8, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (float, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(afloat16, bfloat16, cfloat16, rozmiar * 4);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (float, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(adouble1, bdouble1, cdouble1, rozmiar / 8);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (double, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(adouble4, bdouble4, cdouble4, rozmiar / 2);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (double, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(adouble8, bdouble8, cdouble8, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (double, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(adouble16, bdouble16, cdouble16, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (double, 16MiB) [ms]: %f\n\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(aint1, bint1, cint1, rozmiar / 4);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (int, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(aint4, bint4, cint4, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (int, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(aint8, bint8, cint8, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (int, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(aint16, bint16, cint16, rozmiar * 4);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (int, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(afloat1, bfloat1, cfloat1, rozmiar / 4);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (float, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(afloat4, bfloat4, cfloat4, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (float, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(afloat8, bfloat8, cfloat8, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (float, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(afloat16, bfloat16, cfloat16, rozmiar * 4);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (float, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(adouble1, bdouble1, cdouble1, rozmiar / 8);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (double, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(adouble4, bdouble4, cdouble4, rozmiar / 2);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (double, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(adouble8, bdouble8, cdouble8, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (double, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(adouble16, bdouble16, cdouble16, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (double, 16MiB) [ms]: %f\n\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(aint1, bint1, cint1, rozmiar / 4);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (int, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(aint4, bint4, cint4, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (int, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(aint8, bint8, cint8, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (int, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(aint16, bint16, cint16, rozmiar * 4);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (int, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(afloat1, bfloat1, cfloat1, rozmiar / 4);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (float, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(afloat4, bfloat4, cfloat4, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (float, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(afloat8, bfloat8, cfloat8, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (float, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(afloat16, bfloat16, cfloat16, rozmiar * 4);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (float, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(adouble1, bdouble1, cdouble1, rozmiar / 8);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (double, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(adouble4, bdouble4, cdouble4, rozmiar / 2);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (double, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(adouble8, bdouble8, cdouble8, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (double, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(adouble16, bdouble16, cdouble16, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (double, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] aint1;
delete[] aint4;
delete[] aint8;
delete[] aint16;
delete[] bint1;
delete[] bint4;
delete[] bint8;
delete[] bint16;
delete[] cint1;
delete[] cint4;
delete[] cint8;
delete[] cint16;
delete[] afloat1;
delete[] afloat4;
delete[] afloat8;
delete[] afloat16;
delete[] bfloat1;
delete[] bfloat4;
delete[] bfloat8;
delete[] bfloat16;
delete[] cfloat1;
delete[] cfloat4;
delete[] cfloat8;
delete[] cfloat16;
delete[] adouble1;
delete[] adouble4;
delete[] adouble8;
delete[] adouble16;
delete[] bdouble1;
delete[] bdouble4;
delete[] bdouble8;
delete[] bdouble16;
delete[] cdouble1;
delete[] cdouble4;
delete[] cdouble8;
delete[] cdouble16;
hipFree(dev_aint1);
hipFree(dev_aint4);
hipFree(dev_aint8);
hipFree(dev_aint16);
hipFree(dev_bint1);
hipFree(dev_bint4);
hipFree(dev_bint8);
hipFree(dev_bint16);
hipFree(dev_cint1);
hipFree(dev_cint4);
hipFree(dev_cint8);
hipFree(dev_cint16);
hipFree(dev_afloat1);
hipFree(dev_afloat4);
hipFree(dev_afloat8);
hipFree(dev_afloat16);
hipFree(dev_bfloat1);
hipFree(dev_bfloat4);
hipFree(dev_bfloat8);
hipFree(dev_bfloat16);
hipFree(dev_cfloat1);
hipFree(dev_cfloat4);
hipFree(dev_cfloat8);
hipFree(dev_cfloat16);
hipFree(dev_adouble1);
hipFree(dev_adouble4);
hipFree(dev_adouble8);
hipFree(dev_adouble16);
hipFree(dev_bdouble1);
hipFree(dev_bdouble4);
hipFree(dev_bdouble8);
hipFree(dev_bdouble16);
hipFree(dev_cdouble1);
hipFree(dev_cdouble4);
hipFree(dev_cdouble8);
hipFree(dev_cdouble16);
hipDeviceReset();
return 0;
}
void dodawanieCPU(int *a, int *b, int *c, int rozmiar)
{
for (int i = 0; i < rozmiar; i++)
{
c[i] = a[i] + b[i];
}
}
void mnozenieCPU(int *a, int *b, int *c, int rozmiar)
{
for (int i = 0; i < rozmiar; i++)
{
c[i] = a[i] * b[i];
}
}
void potegowanieCPU(int *a, int *b, int *c, int rozmiar)
{
int wynik;
for (int i = 0; i < rozmiar; i++)
{
wynik = 1;
for (int j = 0; j < b[i]; j++)
{
wynik *= a[i];
}
c[i] = wynik;
}
}
void dodawanieCPU(float *a, float *b, float *c, int rozmiar)
{
for (int i = 0; i < rozmiar; i++)
{
c[i] = a[i] + b[i];
}
}
void mnozenieCPU(float *a, float *b, float *c, int rozmiar)
{
for (int i = 0; i < rozmiar; i++)
{
c[i] = a[i] * b[i];
}
}
void potegowanieCPU(float *a, float *b, float *c, int rozmiar)
{
float wynik;
for (int i = 0; i < rozmiar; i++)
{
wynik = 1;
for (float j = 0; j < b[i]; j++)
{
wynik *= a[i];
}
c[i] = wynik;
}
}
void dodawanieCPU(double *a, double *b, double *c, int rozmiar)
{
for (int i = 0; i < rozmiar; i++)
{
c[i] = a[i] + b[i];
}
}
void mnozenieCPU(double *a, double *b, double *c, int rozmiar)
{
for (int i = 0; i < rozmiar; i++)
{
c[i] = a[i] * b[i];
}
}
void potegowanieCPU(double *a, double *b, double *c, int rozmiar)
{
double wynik;
for (int i = 0; i < rozmiar; i++)
{
wynik = 1;
for (double j = 0; j < b[i]; j++)
{
wynik *= a[i];
}
c[i] = wynik;
}
}
#endif
#ifdef Zad4
__global__ void kernelDodawanieMacierzy(float *a, float *b, float *c, int rozmiar)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i < rozmiar && j < rozmiar)
{
c[i * rozmiar + j] = a[i * rozmiar + j] + b[i * rozmiar + j];
}
}
__global__ void kernelMnozenieMacierzy(float *a, float *b, float *c, int rozmiar)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
float wynik = 0;
if (i < rozmiar && j < rozmiar)
{
for (int k = 0; k < rozmiar; k++)
{
wynik += a[i * rozmiar + k] * b[k * rozmiar + j];
}
c[i * rozmiar + j] = wynik;
}
}
__global__ void kernelDodawanieMacierzy(double *a, double *b, double *c, int rozmiar)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i < rozmiar && j < rozmiar)
{
c[i * rozmiar + j] = a[i * rozmiar + j] + b[i * rozmiar + j];
}
}
__global__ void kernelMnozenieMacierzy(double *a, double *b, double *c, int rozmiar)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
double wynik = 0;
if (i < rozmiar && j < rozmiar)
{
for (int k = 0; k < rozmiar; k++)
{
wynik += a[i * rozmiar + k] * b[k * rozmiar + j];
}
c[i * rozmiar + j] = wynik;
}
}
void dodawaniemacierzyCPU(float *a, float *b, float *c, int rozmiar);
void mnozeniemacierzyCPU(float *a, float *b, float *c, int rozmiar);
void dodawaniemacierzyCPU(double *a, double *b, double *c, int rozmiar);
void mnozeniemacierzyCPU(double *a, double *b, double *c, int rozmiar);
int main()
{
hipSetDevice(0);
double startCPU, stopCPU;
const int rozmiar = 1024;
int liczbaBlokow, rozmiarBloku = ceil(sqrt(rozmiar));
float czasGPU;
hipEvent_t startGPU, stopGPU;
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
float *afloat1 = new float[rozmiar * rozmiar / 4];
float *dev_afloat1;
hipMalloc(&dev_afloat1, rozmiar * rozmiar * sizeof(float) / 4);
hipMemcpy(dev_afloat1, afloat1, rozmiar * rozmiar * sizeof(float) / 4, hipMemcpyHostToDevice);
float *bfloat1 = new float[rozmiar * rozmiar / 4];
float *dev_bfloat1;
hipMalloc(&dev_bfloat1, rozmiar * rozmiar * sizeof(float) / 4);
hipMemcpy(dev_bfloat1, bfloat1, rozmiar * rozmiar * sizeof(float) / 4, hipMemcpyHostToDevice);
float *cfloat1 = new float[rozmiar * rozmiar / 4];
float *dev_cfloat1;
hipMalloc(&dev_cfloat1, rozmiar * rozmiar * sizeof(float) / 4);
hipMemcpy(dev_cfloat1, cfloat1, rozmiar * rozmiar * sizeof(float) / 4, hipMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar / 4 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
hipEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat1, dev_bfloat1, dev_cfloat1, floor(sqrt(rozmiar * rozmiar / 4)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 1) [ms]: %f\n", czasGPU);
hipEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat1, dev_bfloat1, dev_cfloat1, floor(sqrt(rozmiar * rozmiar / 4)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 1) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(afloat1, bfloat1, cfloat1, ceil(rozmiar * rozmiar / 4));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (float, 1) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(afloat1, bfloat1, cfloat1, ceil(rozmiar * rozmiar / 4));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (float, 1) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] afloat1;
hipFree(dev_afloat1);
delete[] bfloat1;
hipFree(dev_bfloat1);
delete[] cfloat1;
hipFree(dev_cfloat1);
float *afloat4 = new float[rozmiar * rozmiar];
float *dev_afloat4;
hipMalloc(&dev_afloat4, rozmiar * rozmiar * sizeof(float));
hipMemcpy(dev_afloat4, afloat4, rozmiar * rozmiar * sizeof(float), hipMemcpyHostToDevice);
float *bfloat4 = new float[rozmiar * rozmiar];
float *dev_bfloat4;
hipMalloc(&dev_bfloat4, rozmiar * rozmiar * sizeof(float));
hipMemcpy(dev_bfloat4, bfloat4, rozmiar * rozmiar * sizeof(float), hipMemcpyHostToDevice);
float *cfloat4 = new float[rozmiar * rozmiar];
float *dev_cfloat4;
hipMalloc(&dev_cfloat4, rozmiar * rozmiar * sizeof(float));
hipMemcpy(dev_cfloat4, cfloat4, rozmiar * rozmiar * sizeof(float), hipMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
hipEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat4, dev_bfloat4, dev_cfloat4, floor(sqrt(rozmiar * rozmiar)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 4) [ms]: %f\n", czasGPU);
hipEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat4, dev_bfloat4, dev_cfloat4, floor(sqrt(rozmiar * rozmiar)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 4) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(afloat4, bfloat4, cfloat4, ceil(rozmiar * rozmiar));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (float, 4) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(afloat4, bfloat4, cfloat4, ceil(rozmiar * rozmiar));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (float, 4) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] afloat4;
hipFree(dev_afloat4);
delete[] bfloat4;
hipFree(dev_bfloat4);
delete[] cfloat4;
hipFree(dev_cfloat4);
float *afloat8 = new float[rozmiar * rozmiar * 2];
float *dev_afloat8;
hipMalloc(&dev_afloat8, rozmiar * rozmiar * sizeof(float) * 2);
hipMemcpy(dev_afloat8, afloat8, rozmiar * rozmiar * sizeof(float) * 2, hipMemcpyHostToDevice);
float *bfloat8 = new float[rozmiar * rozmiar * 2];
float *dev_bfloat8;
hipMalloc(&dev_bfloat8, rozmiar * rozmiar * sizeof(float) * 2);
hipMemcpy(dev_bfloat8, bfloat8, rozmiar * rozmiar * sizeof(float) * 2, hipMemcpyHostToDevice);
float *cfloat8 = new float[rozmiar * rozmiar * 2];
float *dev_cfloat8;
hipMalloc(&dev_cfloat8, rozmiar * rozmiar * sizeof(float) * 2);
hipMemcpy(dev_cfloat8, cfloat8, rozmiar * rozmiar * sizeof(float) * 2, hipMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar * 2 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
hipEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat8, dev_bfloat8, dev_cfloat8, floor(sqrt(rozmiar * rozmiar * 2)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 8) [ms]: %f\n", czasGPU);
hipEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat8, dev_bfloat8, dev_cfloat8, floor(sqrt(rozmiar * rozmiar * 2)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 8) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(afloat8, bfloat8, cfloat8, ceil(rozmiar * rozmiar * 2));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (float, 8) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(afloat8, bfloat8, cfloat8, ceil(rozmiar * rozmiar * 2));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (float, 8) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] afloat8;
hipFree(dev_afloat8);
delete[] bfloat8;
hipFree(dev_bfloat8);
delete[] cfloat8;
hipFree(dev_cfloat8);
float *afloat16 = new float[rozmiar * rozmiar * 4];
float *dev_afloat16;
hipMalloc(&dev_afloat16, rozmiar * rozmiar * sizeof(float) * 4);
hipMemcpy(dev_afloat16, afloat16, rozmiar * rozmiar * sizeof(float) * 4, hipMemcpyHostToDevice);
float *bfloat16 = new float[rozmiar * rozmiar * 4];
float *dev_bfloat16;
hipMalloc(&dev_bfloat16, rozmiar * rozmiar * sizeof(float) * 4);
hipMemcpy(dev_bfloat16, bfloat16, rozmiar * rozmiar * sizeof(float) * 4, hipMemcpyHostToDevice);
float *cfloat16 = new float[rozmiar * rozmiar * 4];
float *dev_cfloat16;
hipMalloc(&dev_cfloat16, rozmiar * rozmiar * sizeof(float) * 4);
hipMemcpy(dev_cfloat16, cfloat16, rozmiar * rozmiar * sizeof(float) * 4, hipMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar * 4 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
hipEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat16, dev_bfloat16, dev_cfloat16, floor(sqrt(rozmiar * rozmiar * 4)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 16) [ms]: %f\n", czasGPU);
hipEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat16, dev_bfloat16, dev_cfloat16, floor(sqrt(rozmiar * rozmiar * 4)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 16) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(afloat16, bfloat16, cfloat16, ceil(rozmiar * rozmiar * 4));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (float, 16) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(afloat16, bfloat16, cfloat16, ceil(rozmiar * rozmiar * 4));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (float, 16) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] afloat16;
hipFree(dev_afloat16);
delete[] bfloat16;
hipFree(dev_bfloat16);
delete[] cfloat16;
hipFree(dev_cfloat16);
double *adouble1 = new double[rozmiar * rozmiar / 8];
double *dev_adouble1;
hipMalloc(&dev_adouble1, rozmiar * rozmiar * sizeof(double) / 8);
hipMemcpy(dev_adouble1, adouble1, rozmiar * rozmiar * sizeof(double) / 8, hipMemcpyHostToDevice);
double *bdouble1 = new double[rozmiar * rozmiar / 8];
double *dev_bdouble1;
hipMalloc(&dev_bdouble1, rozmiar * rozmiar * sizeof(double) / 8);
hipMemcpy(dev_bdouble1, bdouble1, rozmiar * rozmiar * sizeof(double) / 8, hipMemcpyHostToDevice);
double *cdouble1 = new double[rozmiar * rozmiar / 8];
double *dev_cdouble1;
hipMalloc(&dev_cdouble1, rozmiar * rozmiar * sizeof(double) / 8);
hipMemcpy(dev_cdouble1, cdouble1, rozmiar * rozmiar * sizeof(double) / 8, hipMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar / 8 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
hipEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble1, dev_bdouble1, dev_cdouble1, floor(sqrt(rozmiar * rozmiar / 8)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (double, 1) [ms]: %f\n", czasGPU);
hipEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble1, dev_bdouble1, dev_cdouble1, floor(sqrt(rozmiar * rozmiar / 8)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (double, 1) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(adouble1, bdouble1, cdouble1, ceil(rozmiar * rozmiar / 8));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (double, 1) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(adouble1, bdouble1, cdouble1, ceil(rozmiar * rozmiar / 8));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (double, 1) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] adouble1;
hipFree(dev_adouble1);
delete[] bdouble1;
hipFree(dev_bdouble1);
delete[] cdouble1;
hipFree(dev_cdouble1);
double *adouble4 = new double[rozmiar * rozmiar / 2];
double *dev_adouble4;
hipMalloc(&dev_adouble4, rozmiar * rozmiar * sizeof(double) / 2);
hipMemcpy(dev_adouble4, adouble4, rozmiar * rozmiar * sizeof(double) / 2, hipMemcpyHostToDevice);
double *bdouble4 = new double[rozmiar * rozmiar / 2];
double *dev_bdouble4;
hipMalloc(&dev_bdouble4, rozmiar * rozmiar * sizeof(double) / 2);
hipMemcpy(dev_bdouble4, bdouble4, rozmiar * rozmiar * sizeof(double) / 2, hipMemcpyHostToDevice);
double *cdouble4 = new double[rozmiar * rozmiar / 2];
double *dev_cdouble4;
hipMalloc(&dev_cdouble4, rozmiar * rozmiar * sizeof(double) / 2);
hipMemcpy(dev_cdouble4, cdouble4, rozmiar * rozmiar * sizeof(double) / 2, hipMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar / 2 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
hipEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble4, dev_bdouble4, dev_cdouble4, floor(sqrt(rozmiar * rozmiar / 2)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (double, 4) [ms]: %f\n", czasGPU);
hipEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble4, dev_bdouble4, dev_cdouble4, floor(sqrt(rozmiar * rozmiar / 2)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (double, 4) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(adouble4, bdouble4, cdouble4, ceil(rozmiar * rozmiar / 2));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (double, 4) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(adouble4, bdouble4, cdouble4, ceil(rozmiar * rozmiar / 2));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (double, 4) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] adouble4;
hipFree(dev_adouble4);
delete[] bdouble4;
hipFree(dev_bdouble4);
delete[] cdouble4;
hipFree(dev_cdouble4);
double *adouble8 = new double[rozmiar * rozmiar];
double *dev_adouble8;
hipMalloc(&dev_adouble8, rozmiar * rozmiar * sizeof(double));
hipMemcpy(dev_adouble8, adouble8, rozmiar * rozmiar * sizeof(double), hipMemcpyHostToDevice);
double *bdouble8 = new double[rozmiar * rozmiar];
double *dev_bdouble8;
hipMalloc(&dev_bdouble8, rozmiar * rozmiar * sizeof(double));
hipMemcpy(dev_bdouble8, bdouble8, rozmiar * rozmiar * sizeof(double), hipMemcpyHostToDevice);
double *cdouble8 = new double[rozmiar * rozmiar];
double *dev_cdouble8;
hipMalloc(&dev_cdouble8, rozmiar * rozmiar * sizeof(double));
hipMemcpy(dev_cdouble8, cdouble8, rozmiar * rozmiar * sizeof(double), hipMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
hipEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble8, dev_bdouble8, dev_cdouble8, floor(sqrt(rozmiar * rozmiar)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (double, 8) [ms]: %f\n", czasGPU);
hipEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble8, dev_bdouble8, dev_cdouble8, floor(sqrt(rozmiar * rozmiar)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (double, 8) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(adouble8, bdouble8, cdouble8, ceil(rozmiar * rozmiar));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (double, 8) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(adouble8, bdouble8, cdouble8, ceil(rozmiar * rozmiar));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (double, 8) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] adouble8;
hipFree(dev_adouble8);
delete[] bdouble8;
hipFree(dev_bdouble8);
delete[] cdouble8;
hipFree(dev_cdouble8);
double *adouble16 = new double[rozmiar * rozmiar * 2];
double *dev_adouble16;
hipMalloc(&dev_adouble16, rozmiar * rozmiar * sizeof(double) * 2);
hipMemcpy(dev_adouble16, adouble16, rozmiar * rozmiar * sizeof(double) * 2, hipMemcpyHostToDevice);
double *bdouble16 = new double[rozmiar * rozmiar * 2];
double *dev_bdouble16;
hipMalloc(&dev_bdouble16, rozmiar * rozmiar * sizeof(double) * 2);
hipMemcpy(dev_bdouble16, bdouble16, rozmiar * rozmiar * sizeof(double) * 2, hipMemcpyHostToDevice);
double *cdouble16 = new double[rozmiar * rozmiar * 2];
double *dev_cdouble16;
hipMalloc(&dev_cdouble16, rozmiar * rozmiar * sizeof(double) * 2);
hipMemcpy(dev_cdouble16, cdouble16, rozmiar * rozmiar * sizeof(double) * 2, hipMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar * 2 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
hipEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble16, dev_bdouble16, dev_cdouble16, floor(sqrt(rozmiar * rozmiar * 2)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (double, 16) [ms]: %f\n", czasGPU);
hipEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble16, dev_bdouble16, dev_cdouble16, floor(sqrt(rozmiar * rozmiar * 2)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (double, 16) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(adouble16, bdouble16, cdouble16, ceil(rozmiar * rozmiar * 2));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (double, 16) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(adouble16, bdouble16, cdouble16, ceil(rozmiar * rozmiar * 2));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (double, 16) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] adouble16;
hipFree(dev_adouble16);
delete[] bdouble16;
hipFree(dev_bdouble16);
delete[] cdouble16;
hipFree(dev_cdouble16);
hipDeviceReset();
return 0;
}
void dodawaniemacierzyCPU(float *a, float *b, float *c, int rozmiar)
{
int size = floor(sqrt(rozmiar)-1);
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
c[i * size + j] = a[i * size + j] + b[i * size + j];
}
}
}
void mnozeniemacierzyCPU(float *a, float *b, float *c, int rozmiar)
{
int size = floor(sqrt(rozmiar)-1);
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
c[i * size + j] = 0;
for (int k = 0; k < size; k++)
{
c[i * size + j] += a[i * size + k] * b[k * size + j];
}
}
}
}
void dodawaniemacierzyCPU(double *a, double *b, double *c, int rozmiar)
{
int size = floor(sqrt(rozmiar)-1);
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
c[i * size + j] = a[i * size + j] + b[i * size + j];
}
}
}
void mnozeniemacierzyCPU(double *a, double *b, double *c, int rozmiar)
{
int size = floor(sqrt(rozmiar)-1);
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
c[i * size + j] = 0;
for (int k = 0; k < size; k++)
{
c[i * size + j] += a[i * size + k] * b[k * size + j];
}
}
}
}
#endif
#ifdef Zad5
__global__ void kernelDodawanieMacierzy(hipTextureObject_t tex, hipTextureObject_t tex2, float *c, int rozmiar)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
float a = tex1Dfetch<float>(tex, i * rozmiar + j);
float b = tex1Dfetch<float>(tex2, i * rozmiar + j);
if (i < rozmiar && j < rozmiar)
{
c[i * rozmiar + j] = a + b;
}
}
__global__ void kernelMnozenieMacierzy(hipTextureObject_t tex, hipTextureObject_t tex2, float *c, int rozmiar)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
float wynik = 0, a, b;
if (i < rozmiar && j < rozmiar)
{
for (int k = 0; k < rozmiar; k++)
{
wynik += tex1Dfetch<float>(tex, i * rozmiar + k) * tex1Dfetch<float>(tex2, k * rozmiar + j);
}
c[i * rozmiar + j] = wynik;
}
}
int main()
{
hipSetDevice(0);
const int rozmiar = 1024;
int liczbaBlokow, rozmiarBloku = ceil(sqrt(rozmiar));
float czasGPU;
hipEvent_t startGPU, stopGPU;
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
float *afloat1_buffer;
hipMalloc(&afloat1_buffer, rozmiar * rozmiar * sizeof(float) / 4);
float *bfloat1_buffer;
hipMalloc(&bfloat1_buffer, rozmiar * rozmiar * sizeof(float) / 4);
float *cfloat1_buffer;
hipMalloc(&cfloat1_buffer, rozmiar * rozmiar * sizeof(float) / 4);
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = afloat1_buffer;
resDesc.res.linear.desc.f = hipChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32;
resDesc.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) / 4;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
hipTextureObject_t tex = 0;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
hipResourceDesc resDesc2;
memset(&resDesc2, 0, sizeof(resDesc2));
resDesc2.resType = hipResourceTypeLinear;
resDesc2.res.linear.devPtr = bfloat1_buffer;
resDesc2.res.linear.desc.f = hipChannelFormatKindFloat;
resDesc2.res.linear.desc.x = 32;
resDesc2.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) / 4;
hipTextureDesc texDesc2;
memset(&texDesc2, 0, sizeof(texDesc2));
texDesc2.readMode = hipReadModeElementType;
hipTextureObject_t tex2 = 0;
hipCreateTextureObject(&tex2, &resDesc2, &texDesc2, NULL);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar / 4 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
hipEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat1_buffer, floor(sqrt(rozmiar * rozmiar / 4)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 1) [ms]: %f\n", czasGPU);
hipEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat1_buffer, floor(sqrt(rozmiar * rozmiar / 4)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 1) [ms]: %f\n", czasGPU);
hipFree(afloat1_buffer);
hipFree(bfloat1_buffer);
hipFree(cfloat1_buffer);
float *afloat8_buffer;
hipMalloc(&afloat8_buffer, rozmiar * rozmiar * sizeof(float) * 2);
float *bfloat8_buffer;
hipMalloc(&bfloat8_buffer, rozmiar * rozmiar * sizeof(float) * 2);
float *cfloat8_buffer;
hipMalloc(&cfloat8_buffer, rozmiar * rozmiar * sizeof(float) * 2);
resDesc.res.linear.devPtr = afloat8_buffer;
resDesc.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) * 2;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
resDesc2.res.linear.devPtr = bfloat8_buffer;
resDesc2.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) * 2;
hipCreateTextureObject(&tex2, &resDesc2, &texDesc2, NULL);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar * 2 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
hipEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat8_buffer, floor(sqrt(rozmiar * rozmiar * 2)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 8) [ms]: %f\n", czasGPU);
hipEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat8_buffer, floor(sqrt(rozmiar * rozmiar * 2)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 8) [ms]: %f\n", czasGPU);
hipFree(afloat8_buffer);
hipFree(bfloat8_buffer);
hipFree(cfloat8_buffer);
float *afloat96_buffer;
hipMalloc(&afloat96_buffer, rozmiar * rozmiar * sizeof(float) * 24);
float *bfloat96_buffer;
hipMalloc(&bfloat96_buffer, rozmiar * rozmiar * sizeof(float) * 24);
float *cfloat96_buffer;
hipMalloc(&cfloat96_buffer, rozmiar * rozmiar * sizeof(float) * 24);
resDesc.res.linear.devPtr = afloat96_buffer;
resDesc.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) * 24;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
resDesc2.res.linear.devPtr = bfloat96_buffer;
resDesc2.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) * 24;
hipCreateTextureObject(&tex2, &resDesc2, &texDesc2, NULL);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar * 24 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
hipEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat96_buffer, floor(sqrt(rozmiar * rozmiar * 24)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 96) [ms]: %f\n", czasGPU);
hipEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat96_buffer, floor(sqrt(rozmiar * rozmiar * 24)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 96) [ms]: %f\n", czasGPU);
hipFree(afloat8_buffer);
hipFree(bfloat8_buffer);
hipFree(cfloat8_buffer);
float *afloat256_buffer;
hipMalloc(&afloat256_buffer, rozmiar * rozmiar * sizeof(float) * 65);
float *bfloat256_buffer;
hipMalloc(&bfloat256_buffer, rozmiar * rozmiar * sizeof(float) * 65);
float *cfloat256_buffer;
hipMalloc(&cfloat256_buffer, rozmiar * rozmiar * sizeof(float) * 65);
resDesc.res.linear.devPtr = afloat256_buffer;
resDesc.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) * 65;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
resDesc2.res.linear.devPtr = bfloat256_buffer;
resDesc2.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) * 65;
hipCreateTextureObject(&tex2, &resDesc2, &texDesc2, NULL);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar * 65 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
hipEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat256_buffer, floor(sqrt(rozmiar * rozmiar * 65)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 256) [ms]: %f\n", czasGPU);
hipEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat256_buffer, floor(sqrt(rozmiar * rozmiar * 64)));
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipDeviceSynchronize();
hipEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 256) [ms]: %f\n", czasGPU);
hipFree(afloat256_buffer);
hipFree(bfloat256_buffer);
hipFree(cfloat256_buffer);
hipDeviceReset();
return 0;
}
#endif
| 988dcbc740f88222a101726f12ce826034d34367.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <omp.h>
#include <math.h>
#define Zad5
#ifdef Zad1
int coresPerSM(cudaDeviceProp prop) {
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{-1, -1} };
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((prop.major << 4) + prop.minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
prop.major, prop.minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
int main()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("Nazwa urzadzenia: %s\n", prop.name);
printf("Czestotliwosc zegara [KHz]: %d\n", prop.memoryClockRate);
printf("Przepustowosc pamieci [bity]: %d\n", prop.memoryBusWidth);
printf("Compute Capability: %d\n", coresPerSM(prop));
printf("Liczba multiprocesorow: %d\n", prop.multiProcessorCount);
printf("Liczba rdzeni: %d\n", (coresPerSM(prop)) * prop.multiProcessorCount);
cudaSetDevice(0);
cudaDeviceReset();
return 0;
}
#endif
#ifdef Zad2
int main()
{
cudaSetDevice(0);
char *charmib1, *charmib8, *charmib96, *charmib256, *a1, *a8, *a96, *a256;
int *intmib1, *intmib8, *intmib96, *intmib256, *b1, *b8, *b96, *b256;
float *floatmib1, *floatmib8, *floatmib96, *floatmib256, *c1, *c8, *c96, *c256;
double *doublemib1, *doublemib8, *doublemib96, *doublemib256, *d1, *d8, *d96, *d256;
charmib1 = new char[1024 * 1024];
charmib8 = new char[8 * 1024 * 1024];
charmib96 = new char[96 * 1024 * 1024];
charmib256 = new char[256 * 1024 * 1024];
intmib1 = new int[1024 * 1024 / 4];
intmib8 = new int[2 * 1024 * 1024];
intmib96 = new int[24 * 1024 * 1024];
intmib256 = new int[64 * 1024 * 1024];
floatmib1 = new float[1024 * 1024 / 4];
floatmib8 = new float[2 * 1024 * 1024];
floatmib96 = new float[24 * 1024 * 1024];
floatmib256 = new float[64 * 1024 * 1024];
doublemib1 = new double[1024 * 1024 / 8];
doublemib8 = new double[1024 * 1024];
doublemib96 = new double[12 * 1024 * 1024];
doublemib256 = new double[32 * 1024 * 1024];
cudaMalloc(&a1, 1024 * 1024 * sizeof(char));
cudaMalloc(&a8, 1024 * 1024 * 8 * sizeof(char));
cudaMalloc(&a96, 1024 * 1024 * 96 * sizeof(char));
cudaMalloc(&a256, 1024 * 1024 * 256 * sizeof(char));
cudaMalloc(&b1, 1024 * 1024 * sizeof(int) / 4);
cudaMalloc(&b8, 1024 * 1024 * 2 * sizeof(int));
cudaMalloc(&b96, 1024 * 1024 * 24 * sizeof(int));
cudaMalloc(&b256, 1024 * 1024 * 64 * sizeof(int));
cudaMalloc(&c1, 1024 * 1024 * sizeof(float) / 4);
cudaMalloc(&c8, 1024 * 1024 * 2 * sizeof(float));
cudaMalloc(&c96, 1024 * 1024 * 24 * sizeof(float));
cudaMalloc(&c256, 1024 * 1024 * 64 * sizeof(float));
cudaMalloc(&d1, 1024 * 1024 * sizeof(double) / 8);
cudaMalloc(&d8, 1024 * 1024 * sizeof(double));
cudaMalloc(&d96, 1024 * 1024 * 12 * sizeof(double));
cudaMalloc(&d256, 1024 * 1024 * 32 * sizeof(double));
cudaEvent_t start, stop;
float czas;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy(a1, charmib1, 1024 * 1024 * sizeof(char), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (char, 1 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(a8, charmib8, 1024 * 1024 * 8 *sizeof(char), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (char, 8 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(a96, charmib96, 1024 * 1024 * 96 *sizeof(char), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (char, 96 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(a256, charmib256, 1024 * 1024 * 256 * sizeof(char), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (char, 256 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(b1, intmib1, 1024 * 1024 * sizeof(int) / 4, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (int, 1 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(b8, intmib8, 1024 * 1024 * 2 * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (int, 8 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(b96, intmib96, 1024 * 1024 * 24 * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (int, 96 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(b256, intmib256, 1024 * 1024 * 64 * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (int, 256 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(c1, floatmib1, 1024 * 1024 * sizeof(float) / 4, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (float, 1 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(c8, floatmib8, 1024 * 1024 * 2 * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (float, 8 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(c96, floatmib96, 1024 * 1024 * 24 * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (float, 96 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(c256, floatmib256, 1024 * 1024 * 64 * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (float, 256 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(d1, doublemib1, 1024 * 1024 * sizeof(double) / 8, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (double, 1 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(d8, doublemib8, 1024 * 1024 * sizeof(double), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (double, 8 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(d96, doublemib96, 1024 * 1024 * 12 * sizeof(double), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (double, 96 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(d256, doublemib256, 1024 * 1024 * 32 * sizeof(double), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania HostToDevice (double, 256 MiB) [ms]: %f\n\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(charmib1, a1, 1024 * 1024 * sizeof(char), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (char, 1 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(charmib8, a8, 1024 * 1024 * 8 * sizeof(char), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (char, 8 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(charmib96, a96, 1024 * 1024 * 64 * sizeof(char), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (char, 96 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(charmib256, a256, 1024 * 1024 * 256 * sizeof(char), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (char, 256 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(intmib1, b1, 1024 * 1024 * sizeof(int) / 4, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (int, 1 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(intmib8, b8, 1024 * 1024 * 2 * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (int, 8 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(intmib96, b96, 1024 * 1024 * 24 * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (int, 96 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(intmib256, b256, 1024 * 1024 * 64 * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (int, 256 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(floatmib1, c1, 1024 * 1024 * sizeof(float) / 4, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (float, 1 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(floatmib8, c8, 1024 * 1024 * 2 * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (float, 8 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(floatmib96, c96, 1024 * 1024 * 24 * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (float, 96 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(floatmib256, c256, 1024 * 1024 * 64 * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (float, 256 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(doublemib1, d1, 1024 * 1024 * sizeof(double) / 8, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (double, 1 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(doublemib8, d8, 1024 * 1024 * sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (double, 8 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(doublemib96, d96, 1024 * 1024 * 12 * sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (double, 96 MiB) [ms]: %f\n", czas);
cudaEventRecord(start, 0);
cudaMemcpy(doublemib256, d256, 1024 * 1024 * 32 * sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&czas, start, stop);
printf("Czas przesylania DeviceToHost (double, 256 MiB) [ms]: %f\n", czas);
delete[] charmib1;
delete[] charmib8;
delete[] charmib96;
delete[] charmib256;
delete[] intmib1;
delete[] intmib8;
delete[] intmib96;
delete[] intmib256;
delete[] floatmib1;
delete[] floatmib8;
delete[] floatmib96;
delete[] floatmib256;
delete[] doublemib1;
delete[] doublemib8;
delete[] doublemib96;
delete[] doublemib256;
cudaFree(a1);
cudaFree(a8);
cudaFree(a96);
cudaFree(a256);
cudaFree(b1);
cudaFree(b8);
cudaFree(b96);
cudaFree(b256);
cudaFree(c1);
cudaFree(c8);
cudaFree(c96);
cudaFree(c256);
cudaFree(d1);
cudaFree(d8);
cudaFree(d96);
cudaFree(d256);
cudaDeviceReset();
return 0;
}
#endif
#ifdef Zad3
__global__ void kernelMnozenie(int *a, int *b, int *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] * b[i];
}
__global__ void kernelDodawanie(int *a, int *b, int *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void kernelPotegowanie(int *a, int *b, int *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int wynik = 1;
for (int j = 0; j < b[i]; j++)
{
wynik *= a[i];
}
c[i] = wynik;
}
__global__ void kernelMnozenie(float *a, float *b, float *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] * b[i];
}
__global__ void kernelDodawanie(float *a, float *b, float *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void kernelPotegowanie(float *a, float *b, float *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
float wynik = 1;
for (float j = 0; j < b[i]; j++)
{
wynik *= a[i];
}
c[i] = wynik;
}
__global__ void kernelMnozenie(double *a, double *b, double *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] * b[i];
}
__global__ void kernelDodawanie(double *a, double *b, double *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void kernelPotegowanie(double *a, double *b, double *c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
double wynik = 1;
for (double j = 0; j < b[i]; j++)
{
wynik *= a[i];
}
c[i] = wynik;
}
void dodawanieCPU(int *a, int *b, int *c, int rozmiar);
void mnozenieCPU(int *a, int *b, int *c, int rozmiar);
void potegowanieCPU(int *a, int *b, int *c, int rozmiar);
void dodawanieCPU(float *a, float *b, float *c, int rozmiar);
void mnozenieCPU(float *a, float *b, float *c, int rozmiar);
void potegowanieCPU(float *a, float *b, float *c, int rozmiar);
void dodawanieCPU(double *a, double *b, double *c, int rozmiar);
void mnozenieCPU(double *a, double *b, double *c, int rozmiar);
void potegowanieCPU(double *a, double *b, double *c, int rozmiar);
int main()
{
cudaSetDevice(0);
/*const int rozmiar = 9;
int a[rozmiar] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
int b[rozmiar] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
int c[rozmiar] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int *dev_a;
int *dev_b;
int *dev_c;
cudaMalloc(&dev_a, rozmiar * sizeof(int));
cudaMalloc(&dev_b, rozmiar * sizeof(int));
cudaMalloc(&dev_c, rozmiar * sizeof(int));
cudaMemcpy(dev_a, a, rozmiar * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, rozmiar * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, rozmiar * sizeof(int), cudaMemcpyHostToDevice);
kernelDodawanie << <1, rozmiar >> > (dev_a, dev_b, dev_c);
cudaDeviceSynchronize();
cudaMemcpy(c, dev_c, rozmiar * sizeof(int), cudaMemcpyDeviceToHost);
printf("\nDodawanie GPU\n");
for (int i = 0; i < rozmiar; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
kernelMnozenie << <1, rozmiar >> > (dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, rozmiar * sizeof(int), cudaMemcpyDeviceToHost);
printf("\nMnozenie GPU\n");
for (int i = 0; i < rozmiar; i++)
{
printf("%d * %d = %d\n", a[i], b[i], c[i]);
}
kernelPotegowanie << <1, rozmiar >> > (dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, rozmiar * sizeof(int), cudaMemcpyDeviceToHost);
printf("\nPotegowanie GPU\n");
for (int i = 0; i < rozmiar; i++)
{
printf("%d ^ %d = %d\n", a[i], b[i], c[i]);
}
dodawanieCPU(a, b, c, rozmiar);
printf("\nDodawanie CPU\n");
for (int i = 0; i < rozmiar; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
mnozenieCPU(a, b, c, rozmiar);
printf("\nMnozenie CPU\n");
for (int i = 0; i < rozmiar; i++)
{
printf("%d * %d = %d\n", a[i], b[i], c[i]);
}
potegowanieCPU(a, b, c, rozmiar);
printf("\nPotegowanie CPU\n");
for (int i = 0; i < rozmiar; i++)
{
printf("%d ^ %d = %d\n", a[i], b[i], c[i]);
}
*/
const int rozmiar = 1024 * 1024;
int liczbaBlokow;
int rozmiarBloku = 1024;
int *aint1 = new int[rozmiar / 4];
int *bint1 = new int[rozmiar / 4];
int *cint1 = new int[rozmiar / 4];
int *aint4 = new int[rozmiar];
int *bint4 = new int[rozmiar];
int *cint4 = new int[rozmiar];
int *aint8 = new int[rozmiar * 2];
int *bint8 = new int[rozmiar * 2];
int *cint8 = new int[rozmiar * 2];
int *aint16 = new int[rozmiar * 4];
int *bint16 = new int[rozmiar * 4];
int *cint16 = new int[rozmiar * 4];
float *afloat1 = new float[rozmiar / 4];
float *bfloat1 = new float[rozmiar / 4];
float *cfloat1 = new float[rozmiar / 4];
float *afloat4 = new float[rozmiar];
float *bfloat4 = new float[rozmiar];
float *cfloat4 = new float[rozmiar];
float *afloat8 = new float[rozmiar * 2];
float *bfloat8 = new float[rozmiar * 2];
float *cfloat8 = new float[rozmiar * 2];
float *afloat16 = new float[rozmiar * 4];
float *bfloat16 = new float[rozmiar * 4];
float *cfloat16 = new float[rozmiar * 4];
double *adouble1 = new double[rozmiar / 8];
double *bdouble1 = new double[rozmiar / 8];
double *cdouble1 = new double[rozmiar / 8];
double *adouble4 = new double[rozmiar / 2];
double *bdouble4 = new double[rozmiar / 2];
double *cdouble4 = new double[rozmiar / 2];
double *adouble8 = new double[rozmiar];
double *bdouble8 = new double[rozmiar];
double *cdouble8 = new double[rozmiar];
double *adouble16 = new double[rozmiar * 2];
double *bdouble16 = new double[rozmiar * 2];
double *cdouble16 = new double[rozmiar * 2];
int *dev_aint1;
int *dev_aint4;
int *dev_aint8;
int *dev_aint16;
int *dev_bint1;
int *dev_bint4;
int *dev_bint8;
int *dev_bint16;
int *dev_cint1;
int *dev_cint4;
int *dev_cint8;
int *dev_cint16;
float *dev_afloat1;
float *dev_afloat4;
float *dev_afloat8;
float *dev_afloat16;
float *dev_bfloat1;
float *dev_bfloat4;
float *dev_bfloat8;
float *dev_bfloat16;
float *dev_cfloat1;
float *dev_cfloat4;
float *dev_cfloat8;
float *dev_cfloat16;
double *dev_adouble1;
double *dev_adouble4;
double *dev_adouble8;
double *dev_adouble16;
double *dev_bdouble1;
double *dev_bdouble4;
double *dev_bdouble8;
double *dev_bdouble16;
double *dev_cdouble1;
double *dev_cdouble4;
double *dev_cdouble8;
double *dev_cdouble16;
cudaMalloc(&dev_aint1, rozmiar * sizeof(int) / 4);
cudaMalloc(&dev_aint4, rozmiar * sizeof(int));
cudaMalloc(&dev_aint8, rozmiar * sizeof(int) * 2);
cudaMalloc(&dev_aint16, rozmiar * sizeof(int) * 4);
cudaMalloc(&dev_bint1, rozmiar * sizeof(int) / 4);
cudaMalloc(&dev_bint4, rozmiar * sizeof(int));
cudaMalloc(&dev_bint8, rozmiar * sizeof(int) * 2);
cudaMalloc(&dev_bint16, rozmiar * sizeof(int) * 4);
cudaMalloc(&dev_cint1, rozmiar * sizeof(int)) / 4;
cudaMalloc(&dev_cint4, rozmiar * sizeof(int));
cudaMalloc(&dev_cint8, rozmiar * sizeof(int) * 2);
cudaMalloc(&dev_cint16, rozmiar * sizeof(int) * 4);
cudaMalloc(&dev_afloat1, rozmiar * sizeof(float) / 4);
cudaMalloc(&dev_afloat4, rozmiar * sizeof(float));
cudaMalloc(&dev_afloat8, rozmiar * sizeof(float) * 2);
cudaMalloc(&dev_afloat16, rozmiar * sizeof(float) * 4);
cudaMalloc(&dev_bfloat1, rozmiar * sizeof(float) / 4);
cudaMalloc(&dev_bfloat4, rozmiar * sizeof(float));
cudaMalloc(&dev_bfloat8, rozmiar * sizeof(float) * 2);
cudaMalloc(&dev_bfloat16, rozmiar * sizeof(float) * 4);
cudaMalloc(&dev_cfloat1, rozmiar * sizeof(float) / 4);
cudaMalloc(&dev_cfloat4, rozmiar * sizeof(float));
cudaMalloc(&dev_cfloat8, rozmiar * sizeof(float) * 2);
cudaMalloc(&dev_cfloat16, rozmiar * sizeof(float) * 4);
cudaMalloc(&dev_adouble1, rozmiar * sizeof(double) / 8);
cudaMalloc(&dev_adouble4, rozmiar * sizeof(double) / 2);
cudaMalloc(&dev_adouble8, rozmiar * sizeof(double));
cudaMalloc(&dev_adouble16, rozmiar * sizeof(double) * 2);
cudaMalloc(&dev_bdouble1, rozmiar * sizeof(double) / 8);
cudaMalloc(&dev_bdouble4, rozmiar * sizeof(double) / 2);
cudaMalloc(&dev_bdouble8, rozmiar * sizeof(double));
cudaMalloc(&dev_bdouble16, rozmiar * sizeof(double) * 2);
cudaMalloc(&dev_cdouble1, rozmiar * sizeof(double) / 8);
cudaMalloc(&dev_cdouble4, rozmiar * sizeof(double) / 2);
cudaMalloc(&dev_cdouble8, rozmiar * sizeof(double));
cudaMalloc(&dev_cdouble16, rozmiar * sizeof(double) * 2);
cudaMemcpy(dev_aint1, aint1, rozmiar * sizeof(int) / 4, cudaMemcpyHostToDevice);
cudaMemcpy(dev_aint4, aint4, rozmiar * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_aint8, aint8, rozmiar * sizeof(int) * 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_aint16, aint16, rozmiar * sizeof(int) * 4, cudaMemcpyHostToDevice);
cudaMemcpy(dev_bint1, bint1, rozmiar * sizeof(int) / 4, cudaMemcpyHostToDevice);
cudaMemcpy(dev_bint4, bint4, rozmiar * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_bint8, bint8, rozmiar * sizeof(int) * 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_bint16, bint16, rozmiar * sizeof(int) * 4, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cint1, cint1, rozmiar * sizeof(int) / 4, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cint4, cint4, rozmiar * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_cint8, cint8, rozmiar * sizeof(int) * 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cint16, cint16, rozmiar * sizeof(int) * 4, cudaMemcpyHostToDevice);
cudaMemcpy(dev_afloat1, afloat1, rozmiar * sizeof(float) / 4, cudaMemcpyHostToDevice);
cudaMemcpy(dev_afloat4, afloat4, rozmiar * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_afloat8, afloat8, rozmiar * sizeof(float) * 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_afloat16, afloat16, rozmiar * sizeof(float) * 4, cudaMemcpyHostToDevice);
cudaMemcpy(dev_bfloat1, bfloat1, rozmiar * sizeof(float) / 4, cudaMemcpyHostToDevice);
cudaMemcpy(dev_bfloat4, bfloat4, rozmiar * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_bfloat8, bfloat8, rozmiar * sizeof(float) * 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_bfloat16, bfloat16, rozmiar * sizeof(float) * 4, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cfloat1, cfloat1, rozmiar * sizeof(float) / 4, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cfloat4, cfloat4, rozmiar * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_cfloat8, cfloat8, rozmiar * sizeof(float) * 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cfloat16, cfloat16, rozmiar * sizeof(float) * 4, cudaMemcpyHostToDevice);
cudaMemcpy(dev_adouble1, adouble1, rozmiar * sizeof(double) / 8, cudaMemcpyHostToDevice);
cudaMemcpy(dev_adouble4, adouble4, rozmiar * sizeof(double) / 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_adouble8, adouble8, rozmiar * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_adouble16, adouble16, rozmiar * sizeof(double) * 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_bdouble1, bdouble1, rozmiar * sizeof(double) / 8, cudaMemcpyHostToDevice);
cudaMemcpy(dev_bdouble4, bdouble4, rozmiar * sizeof(double) / 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_bdouble8, bdouble8, rozmiar * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_bdouble16, bdouble16, rozmiar * sizeof(double) * 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cdouble1, cdouble1, rozmiar * sizeof(double) / 8, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cdouble4, cdouble4, rozmiar * sizeof(double) / 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cdouble8, cdouble8, rozmiar * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_cdouble16, cdouble16, rozmiar * sizeof(double) * 2, cudaMemcpyHostToDevice);
float czasGPU;
cudaEvent_t startGPU, stopGPU;
double startCPU, stopCPU;
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 4 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint1, dev_bint1, dev_cint1);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cint1, dev_cint1, rozmiar * sizeof(int) / 4, cudaMemcpyDeviceToHost);
printf("Czas dodawania GPU (int, 1MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint4, dev_bint4, dev_cint4);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cint4, dev_cint4, rozmiar * sizeof(int), cudaMemcpyDeviceToHost);
printf("Czas dodawania GPU (int, 4MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint8, dev_bint8, dev_cint8);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cint8, dev_cint8, rozmiar * sizeof(int) * 2, cudaMemcpyDeviceToHost);
printf("Czas dodawania GPU (int, 8MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 4 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint16, dev_bint16, dev_cint16);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cint16, dev_cint16, rozmiar * sizeof(int) * 4, cudaMemcpyDeviceToHost);
printf("Czas dodawania GPU (int, 16MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 4 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat1, dev_bfloat1, dev_cfloat1);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cfloat1, dev_cfloat1, rozmiar * sizeof(float) / 4, cudaMemcpyDeviceToHost);
printf("Czas dodawania GPU (float, 1MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat4, dev_bfloat4, dev_cfloat4);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cfloat4, dev_cfloat4, rozmiar * sizeof(float), cudaMemcpyDeviceToHost);
printf("Czas dodawania GPU (float, 4MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat8, dev_bfloat8, dev_cfloat8);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cfloat8, dev_cfloat8, rozmiar * sizeof(float) * 2, cudaMemcpyDeviceToHost);
printf("Czas dodawania GPU (float, 8MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 4 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat16, dev_bfloat16, dev_cfloat16);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cfloat16, dev_cfloat16, rozmiar * sizeof(float) * 4, cudaMemcpyDeviceToHost);
printf("Czas dodawania GPU (float, 16MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 8 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble1, dev_bdouble1, dev_cdouble1);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cdouble1, dev_cdouble1, rozmiar * sizeof(double) / 8, cudaMemcpyDeviceToHost);
printf("Czas dodawania GPU (double, 1MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 2 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble4, dev_bdouble4, dev_cdouble4);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cdouble4, dev_cdouble4, rozmiar * sizeof(double) / 2, cudaMemcpyDeviceToHost);
printf("Czas dodawania GPU (double, 4MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble8, dev_bdouble8, dev_cdouble8);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cdouble8, dev_cdouble8, rozmiar * sizeof(double), cudaMemcpyDeviceToHost);
printf("Czas dodawania GPU (double, 8MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelDodawanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble16, dev_bdouble16, dev_cdouble16);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cdouble16, dev_cdouble16, rozmiar * sizeof(double) * 2, cudaMemcpyDeviceToHost);
printf("Czas dodawania GPU (double, 16MiB) [ms]: %f\n\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 4 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_aint1, dev_bint1, dev_cint1);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cint1, dev_cint1, rozmiar * sizeof(int) / 4, cudaMemcpyDeviceToHost);
printf("Czas mnozenia GPU (int, 1MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_aint4, dev_bint4, dev_cint4);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cint4, dev_cint4, rozmiar * sizeof(int), cudaMemcpyDeviceToHost);
printf("Czas mnozenia GPU (int, 4MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_aint8, dev_bint8, dev_cint8);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cint8, dev_cint8, rozmiar * sizeof(int) * 2, cudaMemcpyDeviceToHost);
printf("Czas mnozenia GPU (int, 8MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 4 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_aint16, dev_bint16, dev_cint16);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cint16, dev_cint16, rozmiar * sizeof(int) * 4, cudaMemcpyDeviceToHost);
printf("Czas mnozenia GPU (int, 16MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 4 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat1, dev_bfloat1, dev_cfloat1);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cfloat1, dev_cfloat1, rozmiar * sizeof(float) / 4, cudaMemcpyDeviceToHost);
printf("Czas mnozenia GPU (float, 1MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat4, dev_bfloat4, dev_cfloat4);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cfloat4, dev_cfloat4, rozmiar * sizeof(float), cudaMemcpyDeviceToHost);
printf("Czas mnozenia GPU (float, 4MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat8, dev_bfloat8, dev_cfloat8);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cfloat8, dev_cfloat8, rozmiar * sizeof(float) * 2, cudaMemcpyDeviceToHost);
printf("Czas mnozenia GPU (float, 8MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 4 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat16, dev_bfloat16, dev_cfloat16);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cfloat16, dev_cfloat16, rozmiar * sizeof(float) * 4, cudaMemcpyDeviceToHost);
printf("Czas mnozenia GPU (float, 16MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 8 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble1, dev_bdouble1, dev_cdouble1);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cdouble1, dev_cdouble1, rozmiar * sizeof(double) / 8, cudaMemcpyDeviceToHost);
printf("Czas mnozenia GPU (double, 1MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 2 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble4, dev_bdouble4, dev_cdouble4);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cdouble4, dev_cdouble4, rozmiar * sizeof(double) / 2, cudaMemcpyDeviceToHost);
printf("Czas mnozenia GPU (double, 4MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble8, dev_bdouble8, dev_cdouble8);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cdouble8, dev_cdouble8, rozmiar * sizeof(double), cudaMemcpyDeviceToHost);
printf("Czas mnozenia GPU (double, 8MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelMnozenie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble16, dev_bdouble16, dev_cdouble16);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cdouble16, dev_cdouble16, rozmiar * sizeof(double) * 2, cudaMemcpyDeviceToHost);
printf("Czas mnozenia GPU (double, 16MiB) [ms]: %f\n\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 4 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint1, dev_bint1, dev_cint1);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cint1, dev_cint1, rozmiar * sizeof(int) / 4, cudaMemcpyDeviceToHost);
printf("Czas potegowania GPU (int, 1MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint4, dev_bint4, dev_cint4);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cint4, dev_cint4, rozmiar * sizeof(int), cudaMemcpyDeviceToHost);
printf("Czas potegowania GPU (int, 4MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint8, dev_bint8, dev_cint8);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cint8, dev_cint8, rozmiar * sizeof(int) * 2, cudaMemcpyDeviceToHost);
printf("Czas potegowania GPU (int, 8MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 4 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_aint16, dev_bint16, dev_cint16);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cint16, dev_cint16, rozmiar * sizeof(int) * 4, cudaMemcpyDeviceToHost);
printf("Czas potegowania GPU (int, 16MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 4 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat1, dev_bfloat1, dev_cfloat1);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cfloat1, dev_cfloat1, rozmiar * sizeof(float) / 4, cudaMemcpyDeviceToHost);
printf("Czas potegowania GPU (float, 1MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat4, dev_bfloat4, dev_cfloat4);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cfloat4, dev_cfloat4, rozmiar * sizeof(float), cudaMemcpyDeviceToHost);
printf("Czas potegowania GPU (float, 4MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat8, dev_bfloat8, dev_cfloat8);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cfloat8, dev_cfloat8, rozmiar * sizeof(float) * 2, cudaMemcpyDeviceToHost);
printf("Czas potegowania GPU (float, 8MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 4 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_afloat16, dev_bfloat16, dev_cfloat16);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cfloat16, dev_cfloat16, rozmiar * sizeof(float) * 4, cudaMemcpyDeviceToHost);
printf("Czas potegowania GPU (float, 16MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 8 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble1, dev_bdouble1, dev_cdouble1);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cdouble1, dev_cdouble1, rozmiar * sizeof(double) / 8, cudaMemcpyDeviceToHost);
printf("Czas potegowania GPU (double, 1MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar / 2 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble4, dev_bdouble4, dev_cdouble4);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cdouble4, dev_cdouble4, rozmiar * sizeof(double) / 2, cudaMemcpyDeviceToHost);
printf("Czas potegowania GPU (double, 4MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble8, dev_bdouble8, dev_cdouble8);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cdouble8, dev_cdouble8, rozmiar * sizeof(double), cudaMemcpyDeviceToHost);
printf("Czas potegowania GPU (double, 8MiB) [ms]: %f\n", czasGPU);
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
liczbaBlokow = (rozmiar * 2 - rozmiarBloku + 1) / rozmiarBloku;
cudaEventRecord(startGPU, 0);
kernelPotegowanie << <liczbaBlokow, rozmiarBloku >> > (dev_adouble16, dev_bdouble16, dev_cdouble16);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
cudaMemcpy(cdouble16, dev_cdouble16, rozmiar * sizeof(double) * 2, cudaMemcpyDeviceToHost);
printf("Czas potegowaniaGPU (double, 16MiB) [ms]: %f\n\n", czasGPU);
startCPU = omp_get_wtime();
dodawanieCPU(aint1, bint1, cint1, rozmiar / 4);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (int, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(aint4, bint4, cint4, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (int, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(aint8, bint8, cint8, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (int, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(aint16, bint16, cint16, rozmiar * 4);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (int, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(afloat1, bfloat1, cfloat1, rozmiar / 4);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (float, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(afloat4, bfloat4, cfloat4, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (float, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(afloat8, bfloat8, cfloat8, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (float, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(afloat16, bfloat16, cfloat16, rozmiar * 4);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (float, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(adouble1, bdouble1, cdouble1, rozmiar / 8);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (double, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(adouble4, bdouble4, cdouble4, rozmiar / 2);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (double, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(adouble8, bdouble8, cdouble8, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (double, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
dodawanieCPU(adouble16, bdouble16, cdouble16, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas dodawania CPU (double, 16MiB) [ms]: %f\n\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(aint1, bint1, cint1, rozmiar / 4);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (int, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(aint4, bint4, cint4, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (int, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(aint8, bint8, cint8, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (int, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(aint16, bint16, cint16, rozmiar * 4);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (int, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(afloat1, bfloat1, cfloat1, rozmiar / 4);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (float, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(afloat4, bfloat4, cfloat4, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (float, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(afloat8, bfloat8, cfloat8, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (float, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(afloat16, bfloat16, cfloat16, rozmiar * 4);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (float, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(adouble1, bdouble1, cdouble1, rozmiar / 8);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (double, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(adouble4, bdouble4, cdouble4, rozmiar / 2);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (double, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(adouble8, bdouble8, cdouble8, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (double, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozenieCPU(adouble16, bdouble16, cdouble16, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas mnozenia CPU (double, 16MiB) [ms]: %f\n\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(aint1, bint1, cint1, rozmiar / 4);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (int, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(aint4, bint4, cint4, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (int, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(aint8, bint8, cint8, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (int, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(aint16, bint16, cint16, rozmiar * 4);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (int, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(afloat1, bfloat1, cfloat1, rozmiar / 4);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (float, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(afloat4, bfloat4, cfloat4, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (float, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(afloat8, bfloat8, cfloat8, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (float, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(afloat16, bfloat16, cfloat16, rozmiar * 4);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (float, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(adouble1, bdouble1, cdouble1, rozmiar / 8);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (double, 1MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(adouble4, bdouble4, cdouble4, rozmiar / 2);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (double, 4MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(adouble8, bdouble8, cdouble8, rozmiar);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (double, 8MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
potegowanieCPU(adouble16, bdouble16, cdouble16, rozmiar * 2);
stopCPU = omp_get_wtime();
printf("Czas potegowania CPU (double, 16MiB) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] aint1;
delete[] aint4;
delete[] aint8;
delete[] aint16;
delete[] bint1;
delete[] bint4;
delete[] bint8;
delete[] bint16;
delete[] cint1;
delete[] cint4;
delete[] cint8;
delete[] cint16;
delete[] afloat1;
delete[] afloat4;
delete[] afloat8;
delete[] afloat16;
delete[] bfloat1;
delete[] bfloat4;
delete[] bfloat8;
delete[] bfloat16;
delete[] cfloat1;
delete[] cfloat4;
delete[] cfloat8;
delete[] cfloat16;
delete[] adouble1;
delete[] adouble4;
delete[] adouble8;
delete[] adouble16;
delete[] bdouble1;
delete[] bdouble4;
delete[] bdouble8;
delete[] bdouble16;
delete[] cdouble1;
delete[] cdouble4;
delete[] cdouble8;
delete[] cdouble16;
cudaFree(dev_aint1);
cudaFree(dev_aint4);
cudaFree(dev_aint8);
cudaFree(dev_aint16);
cudaFree(dev_bint1);
cudaFree(dev_bint4);
cudaFree(dev_bint8);
cudaFree(dev_bint16);
cudaFree(dev_cint1);
cudaFree(dev_cint4);
cudaFree(dev_cint8);
cudaFree(dev_cint16);
cudaFree(dev_afloat1);
cudaFree(dev_afloat4);
cudaFree(dev_afloat8);
cudaFree(dev_afloat16);
cudaFree(dev_bfloat1);
cudaFree(dev_bfloat4);
cudaFree(dev_bfloat8);
cudaFree(dev_bfloat16);
cudaFree(dev_cfloat1);
cudaFree(dev_cfloat4);
cudaFree(dev_cfloat8);
cudaFree(dev_cfloat16);
cudaFree(dev_adouble1);
cudaFree(dev_adouble4);
cudaFree(dev_adouble8);
cudaFree(dev_adouble16);
cudaFree(dev_bdouble1);
cudaFree(dev_bdouble4);
cudaFree(dev_bdouble8);
cudaFree(dev_bdouble16);
cudaFree(dev_cdouble1);
cudaFree(dev_cdouble4);
cudaFree(dev_cdouble8);
cudaFree(dev_cdouble16);
cudaDeviceReset();
return 0;
}
void dodawanieCPU(int *a, int *b, int *c, int rozmiar)
{
for (int i = 0; i < rozmiar; i++)
{
c[i] = a[i] + b[i];
}
}
void mnozenieCPU(int *a, int *b, int *c, int rozmiar)
{
for (int i = 0; i < rozmiar; i++)
{
c[i] = a[i] * b[i];
}
}
void potegowanieCPU(int *a, int *b, int *c, int rozmiar)
{
int wynik;
for (int i = 0; i < rozmiar; i++)
{
wynik = 1;
for (int j = 0; j < b[i]; j++)
{
wynik *= a[i];
}
c[i] = wynik;
}
}
void dodawanieCPU(float *a, float *b, float *c, int rozmiar)
{
for (int i = 0; i < rozmiar; i++)
{
c[i] = a[i] + b[i];
}
}
void mnozenieCPU(float *a, float *b, float *c, int rozmiar)
{
for (int i = 0; i < rozmiar; i++)
{
c[i] = a[i] * b[i];
}
}
void potegowanieCPU(float *a, float *b, float *c, int rozmiar)
{
float wynik;
for (int i = 0; i < rozmiar; i++)
{
wynik = 1;
for (float j = 0; j < b[i]; j++)
{
wynik *= a[i];
}
c[i] = wynik;
}
}
void dodawanieCPU(double *a, double *b, double *c, int rozmiar)
{
for (int i = 0; i < rozmiar; i++)
{
c[i] = a[i] + b[i];
}
}
void mnozenieCPU(double *a, double *b, double *c, int rozmiar)
{
for (int i = 0; i < rozmiar; i++)
{
c[i] = a[i] * b[i];
}
}
void potegowanieCPU(double *a, double *b, double *c, int rozmiar)
{
double wynik;
for (int i = 0; i < rozmiar; i++)
{
wynik = 1;
for (double j = 0; j < b[i]; j++)
{
wynik *= a[i];
}
c[i] = wynik;
}
}
#endif
#ifdef Zad4
__global__ void kernelDodawanieMacierzy(float *a, float *b, float *c, int rozmiar)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i < rozmiar && j < rozmiar)
{
c[i * rozmiar + j] = a[i * rozmiar + j] + b[i * rozmiar + j];
}
}
__global__ void kernelMnozenieMacierzy(float *a, float *b, float *c, int rozmiar)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
float wynik = 0;
if (i < rozmiar && j < rozmiar)
{
for (int k = 0; k < rozmiar; k++)
{
wynik += a[i * rozmiar + k] * b[k * rozmiar + j];
}
c[i * rozmiar + j] = wynik;
}
}
__global__ void kernelDodawanieMacierzy(double *a, double *b, double *c, int rozmiar)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i < rozmiar && j < rozmiar)
{
c[i * rozmiar + j] = a[i * rozmiar + j] + b[i * rozmiar + j];
}
}
__global__ void kernelMnozenieMacierzy(double *a, double *b, double *c, int rozmiar)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
double wynik = 0;
if (i < rozmiar && j < rozmiar)
{
for (int k = 0; k < rozmiar; k++)
{
wynik += a[i * rozmiar + k] * b[k * rozmiar + j];
}
c[i * rozmiar + j] = wynik;
}
}
void dodawaniemacierzyCPU(float *a, float *b, float *c, int rozmiar);
void mnozeniemacierzyCPU(float *a, float *b, float *c, int rozmiar);
void dodawaniemacierzyCPU(double *a, double *b, double *c, int rozmiar);
void mnozeniemacierzyCPU(double *a, double *b, double *c, int rozmiar);
int main()
{
cudaSetDevice(0);
double startCPU, stopCPU;
const int rozmiar = 1024;
int liczbaBlokow, rozmiarBloku = ceil(sqrt(rozmiar));
float czasGPU;
cudaEvent_t startGPU, stopGPU;
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
float *afloat1 = new float[rozmiar * rozmiar / 4];
float *dev_afloat1;
cudaMalloc(&dev_afloat1, rozmiar * rozmiar * sizeof(float) / 4);
cudaMemcpy(dev_afloat1, afloat1, rozmiar * rozmiar * sizeof(float) / 4, cudaMemcpyHostToDevice);
float *bfloat1 = new float[rozmiar * rozmiar / 4];
float *dev_bfloat1;
cudaMalloc(&dev_bfloat1, rozmiar * rozmiar * sizeof(float) / 4);
cudaMemcpy(dev_bfloat1, bfloat1, rozmiar * rozmiar * sizeof(float) / 4, cudaMemcpyHostToDevice);
float *cfloat1 = new float[rozmiar * rozmiar / 4];
float *dev_cfloat1;
cudaMalloc(&dev_cfloat1, rozmiar * rozmiar * sizeof(float) / 4);
cudaMemcpy(dev_cfloat1, cfloat1, rozmiar * rozmiar * sizeof(float) / 4, cudaMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar / 4 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
cudaEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat1, dev_bfloat1, dev_cfloat1, floor(sqrt(rozmiar * rozmiar / 4)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 1) [ms]: %f\n", czasGPU);
cudaEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat1, dev_bfloat1, dev_cfloat1, floor(sqrt(rozmiar * rozmiar / 4)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 1) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(afloat1, bfloat1, cfloat1, ceil(rozmiar * rozmiar / 4));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (float, 1) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(afloat1, bfloat1, cfloat1, ceil(rozmiar * rozmiar / 4));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (float, 1) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] afloat1;
cudaFree(dev_afloat1);
delete[] bfloat1;
cudaFree(dev_bfloat1);
delete[] cfloat1;
cudaFree(dev_cfloat1);
float *afloat4 = new float[rozmiar * rozmiar];
float *dev_afloat4;
cudaMalloc(&dev_afloat4, rozmiar * rozmiar * sizeof(float));
cudaMemcpy(dev_afloat4, afloat4, rozmiar * rozmiar * sizeof(float), cudaMemcpyHostToDevice);
float *bfloat4 = new float[rozmiar * rozmiar];
float *dev_bfloat4;
cudaMalloc(&dev_bfloat4, rozmiar * rozmiar * sizeof(float));
cudaMemcpy(dev_bfloat4, bfloat4, rozmiar * rozmiar * sizeof(float), cudaMemcpyHostToDevice);
float *cfloat4 = new float[rozmiar * rozmiar];
float *dev_cfloat4;
cudaMalloc(&dev_cfloat4, rozmiar * rozmiar * sizeof(float));
cudaMemcpy(dev_cfloat4, cfloat4, rozmiar * rozmiar * sizeof(float), cudaMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
cudaEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat4, dev_bfloat4, dev_cfloat4, floor(sqrt(rozmiar * rozmiar)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 4) [ms]: %f\n", czasGPU);
cudaEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat4, dev_bfloat4, dev_cfloat4, floor(sqrt(rozmiar * rozmiar)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 4) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(afloat4, bfloat4, cfloat4, ceil(rozmiar * rozmiar));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (float, 4) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(afloat4, bfloat4, cfloat4, ceil(rozmiar * rozmiar));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (float, 4) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] afloat4;
cudaFree(dev_afloat4);
delete[] bfloat4;
cudaFree(dev_bfloat4);
delete[] cfloat4;
cudaFree(dev_cfloat4);
float *afloat8 = new float[rozmiar * rozmiar * 2];
float *dev_afloat8;
cudaMalloc(&dev_afloat8, rozmiar * rozmiar * sizeof(float) * 2);
cudaMemcpy(dev_afloat8, afloat8, rozmiar * rozmiar * sizeof(float) * 2, cudaMemcpyHostToDevice);
float *bfloat8 = new float[rozmiar * rozmiar * 2];
float *dev_bfloat8;
cudaMalloc(&dev_bfloat8, rozmiar * rozmiar * sizeof(float) * 2);
cudaMemcpy(dev_bfloat8, bfloat8, rozmiar * rozmiar * sizeof(float) * 2, cudaMemcpyHostToDevice);
float *cfloat8 = new float[rozmiar * rozmiar * 2];
float *dev_cfloat8;
cudaMalloc(&dev_cfloat8, rozmiar * rozmiar * sizeof(float) * 2);
cudaMemcpy(dev_cfloat8, cfloat8, rozmiar * rozmiar * sizeof(float) * 2, cudaMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar * 2 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
cudaEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat8, dev_bfloat8, dev_cfloat8, floor(sqrt(rozmiar * rozmiar * 2)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 8) [ms]: %f\n", czasGPU);
cudaEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat8, dev_bfloat8, dev_cfloat8, floor(sqrt(rozmiar * rozmiar * 2)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 8) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(afloat8, bfloat8, cfloat8, ceil(rozmiar * rozmiar * 2));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (float, 8) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(afloat8, bfloat8, cfloat8, ceil(rozmiar * rozmiar * 2));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (float, 8) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] afloat8;
cudaFree(dev_afloat8);
delete[] bfloat8;
cudaFree(dev_bfloat8);
delete[] cfloat8;
cudaFree(dev_cfloat8);
float *afloat16 = new float[rozmiar * rozmiar * 4];
float *dev_afloat16;
cudaMalloc(&dev_afloat16, rozmiar * rozmiar * sizeof(float) * 4);
cudaMemcpy(dev_afloat16, afloat16, rozmiar * rozmiar * sizeof(float) * 4, cudaMemcpyHostToDevice);
float *bfloat16 = new float[rozmiar * rozmiar * 4];
float *dev_bfloat16;
cudaMalloc(&dev_bfloat16, rozmiar * rozmiar * sizeof(float) * 4);
cudaMemcpy(dev_bfloat16, bfloat16, rozmiar * rozmiar * sizeof(float) * 4, cudaMemcpyHostToDevice);
float *cfloat16 = new float[rozmiar * rozmiar * 4];
float *dev_cfloat16;
cudaMalloc(&dev_cfloat16, rozmiar * rozmiar * sizeof(float) * 4);
cudaMemcpy(dev_cfloat16, cfloat16, rozmiar * rozmiar * sizeof(float) * 4, cudaMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar * 4 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
cudaEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat16, dev_bfloat16, dev_cfloat16, floor(sqrt(rozmiar * rozmiar * 4)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 16) [ms]: %f\n", czasGPU);
cudaEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_afloat16, dev_bfloat16, dev_cfloat16, floor(sqrt(rozmiar * rozmiar * 4)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 16) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(afloat16, bfloat16, cfloat16, ceil(rozmiar * rozmiar * 4));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (float, 16) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(afloat16, bfloat16, cfloat16, ceil(rozmiar * rozmiar * 4));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (float, 16) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] afloat16;
cudaFree(dev_afloat16);
delete[] bfloat16;
cudaFree(dev_bfloat16);
delete[] cfloat16;
cudaFree(dev_cfloat16);
double *adouble1 = new double[rozmiar * rozmiar / 8];
double *dev_adouble1;
cudaMalloc(&dev_adouble1, rozmiar * rozmiar * sizeof(double) / 8);
cudaMemcpy(dev_adouble1, adouble1, rozmiar * rozmiar * sizeof(double) / 8, cudaMemcpyHostToDevice);
double *bdouble1 = new double[rozmiar * rozmiar / 8];
double *dev_bdouble1;
cudaMalloc(&dev_bdouble1, rozmiar * rozmiar * sizeof(double) / 8);
cudaMemcpy(dev_bdouble1, bdouble1, rozmiar * rozmiar * sizeof(double) / 8, cudaMemcpyHostToDevice);
double *cdouble1 = new double[rozmiar * rozmiar / 8];
double *dev_cdouble1;
cudaMalloc(&dev_cdouble1, rozmiar * rozmiar * sizeof(double) / 8);
cudaMemcpy(dev_cdouble1, cdouble1, rozmiar * rozmiar * sizeof(double) / 8, cudaMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar / 8 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
cudaEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble1, dev_bdouble1, dev_cdouble1, floor(sqrt(rozmiar * rozmiar / 8)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (double, 1) [ms]: %f\n", czasGPU);
cudaEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble1, dev_bdouble1, dev_cdouble1, floor(sqrt(rozmiar * rozmiar / 8)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (double, 1) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(adouble1, bdouble1, cdouble1, ceil(rozmiar * rozmiar / 8));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (double, 1) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(adouble1, bdouble1, cdouble1, ceil(rozmiar * rozmiar / 8));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (double, 1) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] adouble1;
cudaFree(dev_adouble1);
delete[] bdouble1;
cudaFree(dev_bdouble1);
delete[] cdouble1;
cudaFree(dev_cdouble1);
double *adouble4 = new double[rozmiar * rozmiar / 2];
double *dev_adouble4;
cudaMalloc(&dev_adouble4, rozmiar * rozmiar * sizeof(double) / 2);
cudaMemcpy(dev_adouble4, adouble4, rozmiar * rozmiar * sizeof(double) / 2, cudaMemcpyHostToDevice);
double *bdouble4 = new double[rozmiar * rozmiar / 2];
double *dev_bdouble4;
cudaMalloc(&dev_bdouble4, rozmiar * rozmiar * sizeof(double) / 2);
cudaMemcpy(dev_bdouble4, bdouble4, rozmiar * rozmiar * sizeof(double) / 2, cudaMemcpyHostToDevice);
double *cdouble4 = new double[rozmiar * rozmiar / 2];
double *dev_cdouble4;
cudaMalloc(&dev_cdouble4, rozmiar * rozmiar * sizeof(double) / 2);
cudaMemcpy(dev_cdouble4, cdouble4, rozmiar * rozmiar * sizeof(double) / 2, cudaMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar / 2 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
cudaEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble4, dev_bdouble4, dev_cdouble4, floor(sqrt(rozmiar * rozmiar / 2)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (double, 4) [ms]: %f\n", czasGPU);
cudaEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble4, dev_bdouble4, dev_cdouble4, floor(sqrt(rozmiar * rozmiar / 2)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (double, 4) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(adouble4, bdouble4, cdouble4, ceil(rozmiar * rozmiar / 2));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (double, 4) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(adouble4, bdouble4, cdouble4, ceil(rozmiar * rozmiar / 2));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (double, 4) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] adouble4;
cudaFree(dev_adouble4);
delete[] bdouble4;
cudaFree(dev_bdouble4);
delete[] cdouble4;
cudaFree(dev_cdouble4);
double *adouble8 = new double[rozmiar * rozmiar];
double *dev_adouble8;
cudaMalloc(&dev_adouble8, rozmiar * rozmiar * sizeof(double));
cudaMemcpy(dev_adouble8, adouble8, rozmiar * rozmiar * sizeof(double), cudaMemcpyHostToDevice);
double *bdouble8 = new double[rozmiar * rozmiar];
double *dev_bdouble8;
cudaMalloc(&dev_bdouble8, rozmiar * rozmiar * sizeof(double));
cudaMemcpy(dev_bdouble8, bdouble8, rozmiar * rozmiar * sizeof(double), cudaMemcpyHostToDevice);
double *cdouble8 = new double[rozmiar * rozmiar];
double *dev_cdouble8;
cudaMalloc(&dev_cdouble8, rozmiar * rozmiar * sizeof(double));
cudaMemcpy(dev_cdouble8, cdouble8, rozmiar * rozmiar * sizeof(double), cudaMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
cudaEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble8, dev_bdouble8, dev_cdouble8, floor(sqrt(rozmiar * rozmiar)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (double, 8) [ms]: %f\n", czasGPU);
cudaEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble8, dev_bdouble8, dev_cdouble8, floor(sqrt(rozmiar * rozmiar)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (double, 8) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(adouble8, bdouble8, cdouble8, ceil(rozmiar * rozmiar));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (double, 8) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(adouble8, bdouble8, cdouble8, ceil(rozmiar * rozmiar));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (double, 8) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] adouble8;
cudaFree(dev_adouble8);
delete[] bdouble8;
cudaFree(dev_bdouble8);
delete[] cdouble8;
cudaFree(dev_cdouble8);
double *adouble16 = new double[rozmiar * rozmiar * 2];
double *dev_adouble16;
cudaMalloc(&dev_adouble16, rozmiar * rozmiar * sizeof(double) * 2);
cudaMemcpy(dev_adouble16, adouble16, rozmiar * rozmiar * sizeof(double) * 2, cudaMemcpyHostToDevice);
double *bdouble16 = new double[rozmiar * rozmiar * 2];
double *dev_bdouble16;
cudaMalloc(&dev_bdouble16, rozmiar * rozmiar * sizeof(double) * 2);
cudaMemcpy(dev_bdouble16, bdouble16, rozmiar * rozmiar * sizeof(double) * 2, cudaMemcpyHostToDevice);
double *cdouble16 = new double[rozmiar * rozmiar * 2];
double *dev_cdouble16;
cudaMalloc(&dev_cdouble16, rozmiar * rozmiar * sizeof(double) * 2);
cudaMemcpy(dev_cdouble16, cdouble16, rozmiar * rozmiar * sizeof(double) * 2, cudaMemcpyHostToDevice);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar * 2 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
cudaEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble16, dev_bdouble16, dev_cdouble16, floor(sqrt(rozmiar * rozmiar * 2)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (double, 16) [ms]: %f\n", czasGPU);
cudaEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (dev_adouble16, dev_bdouble16, dev_cdouble16, floor(sqrt(rozmiar * rozmiar * 2)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (double, 16) [ms]: %f\n", czasGPU);
startCPU = omp_get_wtime();
dodawaniemacierzyCPU(adouble16, bdouble16, cdouble16, ceil(rozmiar * rozmiar * 2));
stopCPU = omp_get_wtime();
printf("Czas dodawania macierzy CPU (double, 16) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
startCPU = omp_get_wtime();
mnozeniemacierzyCPU(adouble16, bdouble16, cdouble16, ceil(rozmiar * rozmiar * 2));
stopCPU = omp_get_wtime();
printf("Czas mnozenia macierzy CPU (double, 16) [ms]: %f\n", 1000.0 * (stopCPU - startCPU));
delete[] adouble16;
cudaFree(dev_adouble16);
delete[] bdouble16;
cudaFree(dev_bdouble16);
delete[] cdouble16;
cudaFree(dev_cdouble16);
cudaDeviceReset();
return 0;
}
void dodawaniemacierzyCPU(float *a, float *b, float *c, int rozmiar)
{
int size = floor(sqrt(rozmiar)-1);
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
c[i * size + j] = a[i * size + j] + b[i * size + j];
}
}
}
void mnozeniemacierzyCPU(float *a, float *b, float *c, int rozmiar)
{
int size = floor(sqrt(rozmiar)-1);
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
c[i * size + j] = 0;
for (int k = 0; k < size; k++)
{
c[i * size + j] += a[i * size + k] * b[k * size + j];
}
}
}
}
void dodawaniemacierzyCPU(double *a, double *b, double *c, int rozmiar)
{
int size = floor(sqrt(rozmiar)-1);
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
c[i * size + j] = a[i * size + j] + b[i * size + j];
}
}
}
void mnozeniemacierzyCPU(double *a, double *b, double *c, int rozmiar)
{
int size = floor(sqrt(rozmiar)-1);
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
c[i * size + j] = 0;
for (int k = 0; k < size; k++)
{
c[i * size + j] += a[i * size + k] * b[k * size + j];
}
}
}
}
#endif
#ifdef Zad5
__global__ void kernelDodawanieMacierzy(cudaTextureObject_t tex, cudaTextureObject_t tex2, float *c, int rozmiar)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
float a = tex1Dfetch<float>(tex, i * rozmiar + j);
float b = tex1Dfetch<float>(tex2, i * rozmiar + j);
if (i < rozmiar && j < rozmiar)
{
c[i * rozmiar + j] = a + b;
}
}
__global__ void kernelMnozenieMacierzy(cudaTextureObject_t tex, cudaTextureObject_t tex2, float *c, int rozmiar)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
float wynik = 0, a, b;
if (i < rozmiar && j < rozmiar)
{
for (int k = 0; k < rozmiar; k++)
{
wynik += tex1Dfetch<float>(tex, i * rozmiar + k) * tex1Dfetch<float>(tex2, k * rozmiar + j);
}
c[i * rozmiar + j] = wynik;
}
}
int main()
{
cudaSetDevice(0);
const int rozmiar = 1024;
int liczbaBlokow, rozmiarBloku = ceil(sqrt(rozmiar));
float czasGPU;
cudaEvent_t startGPU, stopGPU;
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
float *afloat1_buffer;
cudaMalloc(&afloat1_buffer, rozmiar * rozmiar * sizeof(float) / 4);
float *bfloat1_buffer;
cudaMalloc(&bfloat1_buffer, rozmiar * rozmiar * sizeof(float) / 4);
float *cfloat1_buffer;
cudaMalloc(&cfloat1_buffer, rozmiar * rozmiar * sizeof(float) / 4);
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = afloat1_buffer;
resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32;
resDesc.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) / 4;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaTextureObject_t tex = 0;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
cudaResourceDesc resDesc2;
memset(&resDesc2, 0, sizeof(resDesc2));
resDesc2.resType = cudaResourceTypeLinear;
resDesc2.res.linear.devPtr = bfloat1_buffer;
resDesc2.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc2.res.linear.desc.x = 32;
resDesc2.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) / 4;
cudaTextureDesc texDesc2;
memset(&texDesc2, 0, sizeof(texDesc2));
texDesc2.readMode = cudaReadModeElementType;
cudaTextureObject_t tex2 = 0;
cudaCreateTextureObject(&tex2, &resDesc2, &texDesc2, NULL);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar / 4 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
cudaEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat1_buffer, floor(sqrt(rozmiar * rozmiar / 4)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 1) [ms]: %f\n", czasGPU);
cudaEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat1_buffer, floor(sqrt(rozmiar * rozmiar / 4)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 1) [ms]: %f\n", czasGPU);
cudaFree(afloat1_buffer);
cudaFree(bfloat1_buffer);
cudaFree(cfloat1_buffer);
float *afloat8_buffer;
cudaMalloc(&afloat8_buffer, rozmiar * rozmiar * sizeof(float) * 2);
float *bfloat8_buffer;
cudaMalloc(&bfloat8_buffer, rozmiar * rozmiar * sizeof(float) * 2);
float *cfloat8_buffer;
cudaMalloc(&cfloat8_buffer, rozmiar * rozmiar * sizeof(float) * 2);
resDesc.res.linear.devPtr = afloat8_buffer;
resDesc.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) * 2;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
resDesc2.res.linear.devPtr = bfloat8_buffer;
resDesc2.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) * 2;
cudaCreateTextureObject(&tex2, &resDesc2, &texDesc2, NULL);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar * 2 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
cudaEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat8_buffer, floor(sqrt(rozmiar * rozmiar * 2)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 8) [ms]: %f\n", czasGPU);
cudaEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat8_buffer, floor(sqrt(rozmiar * rozmiar * 2)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 8) [ms]: %f\n", czasGPU);
cudaFree(afloat8_buffer);
cudaFree(bfloat8_buffer);
cudaFree(cfloat8_buffer);
float *afloat96_buffer;
cudaMalloc(&afloat96_buffer, rozmiar * rozmiar * sizeof(float) * 24);
float *bfloat96_buffer;
cudaMalloc(&bfloat96_buffer, rozmiar * rozmiar * sizeof(float) * 24);
float *cfloat96_buffer;
cudaMalloc(&cfloat96_buffer, rozmiar * rozmiar * sizeof(float) * 24);
resDesc.res.linear.devPtr = afloat96_buffer;
resDesc.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) * 24;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
resDesc2.res.linear.devPtr = bfloat96_buffer;
resDesc2.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) * 24;
cudaCreateTextureObject(&tex2, &resDesc2, &texDesc2, NULL);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar * 24 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
cudaEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat96_buffer, floor(sqrt(rozmiar * rozmiar * 24)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 96) [ms]: %f\n", czasGPU);
cudaEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat96_buffer, floor(sqrt(rozmiar * rozmiar * 24)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 96) [ms]: %f\n", czasGPU);
cudaFree(afloat8_buffer);
cudaFree(bfloat8_buffer);
cudaFree(cfloat8_buffer);
float *afloat256_buffer;
cudaMalloc(&afloat256_buffer, rozmiar * rozmiar * sizeof(float) * 65);
float *bfloat256_buffer;
cudaMalloc(&bfloat256_buffer, rozmiar * rozmiar * sizeof(float) * 65);
float *cfloat256_buffer;
cudaMalloc(&cfloat256_buffer, rozmiar * rozmiar * sizeof(float) * 65);
resDesc.res.linear.devPtr = afloat256_buffer;
resDesc.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) * 65;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
resDesc2.res.linear.devPtr = bfloat256_buffer;
resDesc2.res.linear.sizeInBytes = rozmiar * rozmiar * sizeof(float) * 65;
cudaCreateTextureObject(&tex2, &resDesc2, &texDesc2, NULL);
liczbaBlokow = ceil(sqrt((rozmiar * rozmiar * 65 + rozmiar - 1) / rozmiar));
rozmiarBloku = ceil(sqrt(rozmiar));
cudaEventRecord(startGPU, 0);
kernelDodawanieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat256_buffer, floor(sqrt(rozmiar * rozmiar * 65)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas dodawania macierzy GPU (float, 256) [ms]: %f\n", czasGPU);
cudaEventRecord(startGPU, 0);
kernelMnozenieMacierzy << <dim3(liczbaBlokow, liczbaBlokow), dim3(rozmiarBloku, rozmiarBloku) >> > (tex, tex2, cfloat256_buffer, floor(sqrt(rozmiar * rozmiar * 64)));
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaDeviceSynchronize();
cudaEventElapsedTime(&czasGPU, startGPU, stopGPU);
printf("Czas mnozenia macierzy GPU (float, 256) [ms]: %f\n", czasGPU);
cudaFree(afloat256_buffer);
cudaFree(bfloat256_buffer);
cudaFree(cfloat256_buffer);
cudaDeviceReset();
return 0;
}
#endif
|
056d8f861963723e4bd612ff55c465ba37622101.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <float.h>
#include <fstream>
#include <string>
#include <cstring>
#include <sstream>
#include <iostream>
#include <chrono>
#include <hip/hip_runtime.h>
using namespace std::chrono;
typedef high_resolution_clock myclock;
typedef duration<float> myduration;
#define MAX_WG_SIZE 256
template <typename T>
T* mem_alloc (const int align, const size_t size) {
return (T*) aligned_alloc(align, size * sizeof(T));
}
template <typename T>
void mem_free (T* p) {
free(p);
}
__device__
float gammafunction(unsigned int n)
{
if(n == 0)
return 0.0f;
float x = ((float)n + 0.5f) * log((float) n) -
((float)n - 1.0f) * log(exp(1.0f));
return x;
}
__global__ void epi(const unsigned int* dev_data_zeros,
const unsigned int* dev_data_ones,
float* dev_scores,
const int num_snp,
const int PP_zeros,
const int PP_ones,
const int mask_zeros,
const int mask_ones)
{
int i, j, tid, p, k;
float score = FLT_MAX;
j = blockDim.x * blockIdx.x + threadIdx.x;
i = blockDim.y * blockIdx.y + threadIdx.y;
tid = i * num_snp + j;
if (j > i && i < num_snp && j < num_snp) {
unsigned int ft[2 * 9];
for(k = 0; k < 2 * 9; k++) ft[k] = 0;
unsigned int t00, t01, t02, t10, t11, t12, t20, t21, t22;
unsigned int di2, dj2;
unsigned int* SNPi;
unsigned int* SNPj;
// Phenotype 0
SNPi = (unsigned int*) &dev_data_zeros[i * 2];
SNPj = (unsigned int*) &dev_data_zeros[j * 2];
for (p = 0; p < 2 * PP_zeros * num_snp - 2 * num_snp; p += 2 * num_snp) {
di2 = ~(SNPi[p] | SNPi[p + 1]);
dj2 = ~(SNPj[p] | SNPj[p + 1]);
t00 = SNPi[p] & SNPj[p];
t01 = SNPi[p] & SNPj[p + 1];
t02 = SNPi[p] & dj2;
t10 = SNPi[p + 1] & SNPj[p];
t11 = SNPi[p + 1] & SNPj[p + 1];
t12 = SNPi[p + 1] & dj2;
t20 = di2 & SNPj[p];
t21 = di2 & SNPj[p + 1];
t22 = di2 & dj2;
ft[0] += __popc(t00);
ft[1] += __popc(t01);
ft[2] += __popc(t02);
ft[3] += __popc(t10);
ft[4] += __popc(t11);
ft[5] += __popc(t12);
ft[6] += __popc(t20);
ft[7] += __popc(t21);
ft[8] += __popc(t22);
}
// remainder
p = 2 * PP_zeros * num_snp - 2 * num_snp;
di2 = ~(SNPi[p] | SNPi[p + 1]);
dj2 = ~(SNPj[p] | SNPj[p + 1]);
di2 = di2 & mask_zeros;
dj2 = dj2 & mask_zeros;
t00 = SNPi[p] & SNPj[p];
t01 = SNPi[p] & SNPj[p + 1];
t02 = SNPi[p] & dj2;
t10 = SNPi[p + 1] & SNPj[p];
t11 = SNPi[p + 1] & SNPj[p + 1];
t12 = SNPi[p + 1] & dj2;
t20 = di2 & SNPj[p];
t21 = di2 & SNPj[p + 1];
t22 = di2 & dj2;
ft[0] += __popc(t00);
ft[1] += __popc(t01);
ft[2] += __popc(t02);
ft[3] += __popc(t10);
ft[4] += __popc(t11);
ft[5] += __popc(t12);
ft[6] += __popc(t20);
ft[7] += __popc(t21);
ft[8] += __popc(t22);
// Phenotype 1
SNPi = (unsigned int*) &dev_data_ones[i * 2];
SNPj = (unsigned int*) &dev_data_ones[j * 2];
for(p = 0; p < 2 * PP_ones * num_snp - 2 * num_snp; p += 2 * num_snp)
{
di2 = ~(SNPi[p] | SNPi[p + 1]);
dj2 = ~(SNPj[p] | SNPj[p + 1]);
t00 = SNPi[p] & SNPj[p];
t01 = SNPi[p] & SNPj[p + 1];
t02 = SNPi[p] & dj2;
t10 = SNPi[p + 1] & SNPj[p];
t11 = SNPi[p + 1] & SNPj[p + 1];
t12 = SNPi[p + 1] & dj2;
t20 = di2 & SNPj[p];
t21 = di2 & SNPj[p + 1];
t22 = di2 & dj2;
ft[9] += __popc(t00);
ft[10] += __popc(t01);
ft[11] += __popc(t02);
ft[12] += __popc(t10);
ft[13] += __popc(t11);
ft[14] += __popc(t12);
ft[15] += __popc(t20);
ft[16] += __popc(t21);
ft[17] += __popc(t22);
}
p = 2 * PP_ones * num_snp - 2 * num_snp;
di2 = ~(SNPi[p] | SNPi[p + 1]);
dj2 = ~(SNPj[p] | SNPj[p + 1]);
di2 = di2 & mask_ones;
dj2 = dj2 & mask_ones;
t00 = SNPi[p] & SNPj[p];
t01 = SNPi[p] & SNPj[p + 1];
t02 = SNPi[p] & dj2;
t10 = SNPi[p + 1] & SNPj[p];
t11 = SNPi[p + 1] & SNPj[p + 1];
t12 = SNPi[p + 1] & dj2;
t20 = di2 & SNPj[p];
t21 = di2 & SNPj[p + 1];
t22 = di2 & dj2;
ft[9] += __popc(t00);
ft[10] += __popc(t01);
ft[11] += __popc(t02);
ft[12] += __popc(t10);
ft[13] += __popc(t11);
ft[14] += __popc(t12);
ft[15] += __popc(t20);
ft[16] += __popc(t21);
ft[17] += __popc(t22);
// compute score
score = 0.0f;
for(k = 0; k < 9; k++)
score += gammafunction(ft[k] + ft[9 + k] + 1) - gammafunction(ft[k]) - gammafunction(ft[9 + k]);
score = fabs(score);
if(score == 0.0f)
score = FLT_MAX;
dev_scores[tid] = score;
}
}
int main(int argc, char **argv)
{
int i, j, x;
int num_pac = atoi(argv[1]); // #samples
int num_snp = atoi(argv[2]); // #SNPs
int iteration = atoi(argv[3]);// #kernel run
int block_snp = 64;
srand(100);
unsigned char *SNP_Data = mem_alloc<unsigned char>(64, num_pac * num_snp);
unsigned char *Ph_Data = mem_alloc<unsigned char>(64, num_pac);
// generate SNPs between 0 and 2
for (i = 0; i < num_pac; i++)
for(j = 0; j < num_snp; j++)
SNP_Data[i * num_snp + j] = rand() % 3;
// generate phenotype between 0 and 1
for(int i = 0; i < num_pac; i++) Ph_Data[i] = rand() % 2;
// transpose the SNP data
unsigned char *SNP_Data_trans = mem_alloc<unsigned char>(64, num_pac * num_snp);
for (i = 0; i < num_pac; i++)
for(j = 0; j < num_snp; j++)
SNP_Data_trans[j * num_pac + i] = SNP_Data[i * num_snp + j];
int phen_ones = 0;
for(i = 0; i < num_pac; i++)
if(Ph_Data[i] == 1)
phen_ones++;
// transform SNP data to a binary format
int PP_zeros = ceil((1.0*(num_pac - phen_ones))/32.0);
int PP_ones = ceil((1.0*phen_ones)/32.0);
unsigned int *bin_data_zeros = mem_alloc<unsigned int>(64, num_snp * PP_zeros * 2);
unsigned int *bin_data_ones = mem_alloc<unsigned int>(64, num_snp * PP_ones * 2);
memset(bin_data_zeros, 0, num_snp*PP_zeros*2*sizeof(unsigned int));
memset(bin_data_ones, 0, num_snp*PP_ones*2*sizeof(unsigned int));
for(i = 0; i < num_snp; i++)
{
int x_zeros = -1;
int x_ones = -1;
int n_zeros = 0;
int n_ones = 0;
for(j = 0; j < num_pac; j++){
unsigned int temp = (unsigned int) SNP_Data_trans[i * num_pac + j];
if(Ph_Data[j] == 1){
if(n_ones%32 == 0){
x_ones ++;
}
// apply 1 shift left to 2 components
bin_data_ones[i * PP_ones * 2 + x_ones*2 + 0] <<= 1;
bin_data_ones[i * PP_ones * 2 + x_ones*2 + 1] <<= 1;
// insert '1' in correct component
if(temp == 0 || temp == 1){
bin_data_ones[i * PP_ones * 2 + x_ones*2 + temp ] |= 1;
}
n_ones ++;
} else {
if(n_zeros%32 == 0){
x_zeros ++;
}
// apply 1 shift left to 2 components
bin_data_zeros[i * PP_zeros * 2 + x_zeros*2 + 0] <<= 1;
bin_data_zeros[i * PP_zeros * 2 + x_zeros*2 + 1] <<= 1;
// insert '1' in correct component
if(temp == 0 || temp == 1){
bin_data_zeros[i * PP_zeros * 2 + x_zeros*2 + temp] |= 1;
}
n_zeros ++;
}
}
}
unsigned int mask_zeros = 0xFFFFFFFF;
for(int x = num_pac - phen_ones; x < PP_zeros * 32; x++)
mask_zeros = mask_zeros >> 1;
unsigned int mask_ones = 0xFFFFFFFF;
for(x = phen_ones; x < PP_ones * 32; x++)
mask_ones = mask_ones >> 1;
// transpose the binary data structures
unsigned int* bin_data_ones_trans = mem_alloc<unsigned int>(64, num_snp * PP_ones * 2);
for(i = 0; i < num_snp; i++)
for(j = 0; j < PP_ones; j++)
{
bin_data_ones_trans[(j * num_snp + i) * 2 + 0] = bin_data_ones[(i * PP_ones + j) * 2 + 0];
bin_data_ones_trans[(j * num_snp + i) * 2 + 1] = bin_data_ones[(i * PP_ones + j) * 2 + 1];
}
unsigned int* bin_data_zeros_trans = mem_alloc<unsigned int>(64, num_snp * PP_zeros * 2);
for(i = 0; i < num_snp; i++)
for(j = 0; j < PP_zeros; j++)
{
bin_data_zeros_trans[(j * num_snp + i) * 2 + 0] = bin_data_zeros[(i * PP_zeros + j) * 2 + 0];
bin_data_zeros_trans[(j * num_snp + i) * 2 + 1] = bin_data_zeros[(i * PP_zeros + j) * 2 + 1];
}
float *scores = mem_alloc<float>(64, num_snp * num_snp);
for(x = 0; x < num_snp * num_snp; x++) scores[x] = FLT_MAX;
auto start = myclock::now();
unsigned int* d_data_zeros;
hipMalloc((void**)&d_data_zeros, num_snp*PP_zeros*2*sizeof(unsigned int));
hipMemcpy(d_data_zeros, bin_data_zeros_trans,
num_snp*PP_zeros*2*sizeof(unsigned int), hipMemcpyHostToDevice);
unsigned int* d_data_ones;
hipMalloc((void**)&d_data_ones, num_snp*PP_ones*2*sizeof(unsigned int));
hipMemcpy(d_data_ones, bin_data_ones_trans,
num_snp*PP_ones*2*sizeof(unsigned int), hipMemcpyHostToDevice);
float* d_scores;
hipMalloc((void**)&d_scores, num_snp*num_snp*sizeof(float));
// setup kernel ND-range
int num_snp_m = num_snp;
while(num_snp_m % block_snp != 0) num_snp_m++;
dim3 grid(num_snp_m / block_snp, num_snp_m);
dim3 block(block_snp, 1);
// epistasis detection kernel
for (int i = 0; i < iteration; i++) {
hipMemcpy(d_scores, scores, sizeof(float) * num_snp * num_snp, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( epi), dim3(grid), dim3(block), 0, 0, d_data_zeros, d_data_ones, d_scores, num_snp,
PP_zeros, PP_ones, mask_zeros, mask_ones);
}
hipMemcpy(scores, d_scores, sizeof(float) * num_snp * num_snp, hipMemcpyDeviceToHost);
auto end = myclock::now();
myduration elapsed = end - start;
std::cout << "Total offloading time: " << elapsed.count() << " sec" << std::endl;
// compute the minimum score on a host
float score = scores[0];
int solution = 0;
for (int i = 1; i < num_snp * num_snp; i++) {
if (score > scores[i]) {
score = scores[i];
solution = i;
}
}
std::cout << "Score: " << score << std::endl;
std::cout << "Solution: " << solution / num_snp << ", " << solution % num_snp << std::endl;
if ( (fabsf(score - 83.844f) > 1e-3f) || (solution / num_snp != 1253) ||
(solution % num_snp != 25752) )
std::cout << "FAIL\n";
else
std::cout << "PASS\n";
hipFree(d_data_zeros);
hipFree(d_data_ones);
hipFree(d_scores);
mem_free(bin_data_zeros);
mem_free(bin_data_ones);
mem_free(bin_data_zeros_trans);
mem_free(bin_data_ones_trans);
mem_free(scores);
mem_free(SNP_Data);
mem_free(SNP_Data_trans);
mem_free(Ph_Data);
return 0;
}
| 056d8f861963723e4bd612ff55c465ba37622101.cu |
#include <math.h>
#include <float.h>
#include <fstream>
#include <string>
#include <cstring>
#include <sstream>
#include <iostream>
#include <chrono>
#include <cuda.h>
using namespace std::chrono;
typedef high_resolution_clock myclock;
typedef duration<float> myduration;
#define MAX_WG_SIZE 256
template <typename T>
T* mem_alloc (const int align, const size_t size) {
return (T*) aligned_alloc(align, size * sizeof(T));
}
template <typename T>
void mem_free (T* p) {
free(p);
}
__device__
float gammafunction(unsigned int n)
{
if(n == 0)
return 0.0f;
float x = ((float)n + 0.5f) * log((float) n) -
((float)n - 1.0f) * log(exp(1.0f));
return x;
}
__global__ void epi(const unsigned int* dev_data_zeros,
const unsigned int* dev_data_ones,
float* dev_scores,
const int num_snp,
const int PP_zeros,
const int PP_ones,
const int mask_zeros,
const int mask_ones)
{
int i, j, tid, p, k;
float score = FLT_MAX;
j = blockDim.x * blockIdx.x + threadIdx.x;
i = blockDim.y * blockIdx.y + threadIdx.y;
tid = i * num_snp + j;
if (j > i && i < num_snp && j < num_snp) {
unsigned int ft[2 * 9];
for(k = 0; k < 2 * 9; k++) ft[k] = 0;
unsigned int t00, t01, t02, t10, t11, t12, t20, t21, t22;
unsigned int di2, dj2;
unsigned int* SNPi;
unsigned int* SNPj;
// Phenotype 0
SNPi = (unsigned int*) &dev_data_zeros[i * 2];
SNPj = (unsigned int*) &dev_data_zeros[j * 2];
for (p = 0; p < 2 * PP_zeros * num_snp - 2 * num_snp; p += 2 * num_snp) {
di2 = ~(SNPi[p] | SNPi[p + 1]);
dj2 = ~(SNPj[p] | SNPj[p + 1]);
t00 = SNPi[p] & SNPj[p];
t01 = SNPi[p] & SNPj[p + 1];
t02 = SNPi[p] & dj2;
t10 = SNPi[p + 1] & SNPj[p];
t11 = SNPi[p + 1] & SNPj[p + 1];
t12 = SNPi[p + 1] & dj2;
t20 = di2 & SNPj[p];
t21 = di2 & SNPj[p + 1];
t22 = di2 & dj2;
ft[0] += __popc(t00);
ft[1] += __popc(t01);
ft[2] += __popc(t02);
ft[3] += __popc(t10);
ft[4] += __popc(t11);
ft[5] += __popc(t12);
ft[6] += __popc(t20);
ft[7] += __popc(t21);
ft[8] += __popc(t22);
}
// remainder
p = 2 * PP_zeros * num_snp - 2 * num_snp;
di2 = ~(SNPi[p] | SNPi[p + 1]);
dj2 = ~(SNPj[p] | SNPj[p + 1]);
di2 = di2 & mask_zeros;
dj2 = dj2 & mask_zeros;
t00 = SNPi[p] & SNPj[p];
t01 = SNPi[p] & SNPj[p + 1];
t02 = SNPi[p] & dj2;
t10 = SNPi[p + 1] & SNPj[p];
t11 = SNPi[p + 1] & SNPj[p + 1];
t12 = SNPi[p + 1] & dj2;
t20 = di2 & SNPj[p];
t21 = di2 & SNPj[p + 1];
t22 = di2 & dj2;
ft[0] += __popc(t00);
ft[1] += __popc(t01);
ft[2] += __popc(t02);
ft[3] += __popc(t10);
ft[4] += __popc(t11);
ft[5] += __popc(t12);
ft[6] += __popc(t20);
ft[7] += __popc(t21);
ft[8] += __popc(t22);
// Phenotype 1
SNPi = (unsigned int*) &dev_data_ones[i * 2];
SNPj = (unsigned int*) &dev_data_ones[j * 2];
for(p = 0; p < 2 * PP_ones * num_snp - 2 * num_snp; p += 2 * num_snp)
{
di2 = ~(SNPi[p] | SNPi[p + 1]);
dj2 = ~(SNPj[p] | SNPj[p + 1]);
t00 = SNPi[p] & SNPj[p];
t01 = SNPi[p] & SNPj[p + 1];
t02 = SNPi[p] & dj2;
t10 = SNPi[p + 1] & SNPj[p];
t11 = SNPi[p + 1] & SNPj[p + 1];
t12 = SNPi[p + 1] & dj2;
t20 = di2 & SNPj[p];
t21 = di2 & SNPj[p + 1];
t22 = di2 & dj2;
ft[9] += __popc(t00);
ft[10] += __popc(t01);
ft[11] += __popc(t02);
ft[12] += __popc(t10);
ft[13] += __popc(t11);
ft[14] += __popc(t12);
ft[15] += __popc(t20);
ft[16] += __popc(t21);
ft[17] += __popc(t22);
}
p = 2 * PP_ones * num_snp - 2 * num_snp;
di2 = ~(SNPi[p] | SNPi[p + 1]);
dj2 = ~(SNPj[p] | SNPj[p + 1]);
di2 = di2 & mask_ones;
dj2 = dj2 & mask_ones;
t00 = SNPi[p] & SNPj[p];
t01 = SNPi[p] & SNPj[p + 1];
t02 = SNPi[p] & dj2;
t10 = SNPi[p + 1] & SNPj[p];
t11 = SNPi[p + 1] & SNPj[p + 1];
t12 = SNPi[p + 1] & dj2;
t20 = di2 & SNPj[p];
t21 = di2 & SNPj[p + 1];
t22 = di2 & dj2;
ft[9] += __popc(t00);
ft[10] += __popc(t01);
ft[11] += __popc(t02);
ft[12] += __popc(t10);
ft[13] += __popc(t11);
ft[14] += __popc(t12);
ft[15] += __popc(t20);
ft[16] += __popc(t21);
ft[17] += __popc(t22);
// compute score
score = 0.0f;
for(k = 0; k < 9; k++)
score += gammafunction(ft[k] + ft[9 + k] + 1) - gammafunction(ft[k]) - gammafunction(ft[9 + k]);
score = fabs(score);
if(score == 0.0f)
score = FLT_MAX;
dev_scores[tid] = score;
}
}
int main(int argc, char **argv)
{
int i, j, x;
int num_pac = atoi(argv[1]); // #samples
int num_snp = atoi(argv[2]); // #SNPs
int iteration = atoi(argv[3]);// #kernel run
int block_snp = 64;
srand(100);
unsigned char *SNP_Data = mem_alloc<unsigned char>(64, num_pac * num_snp);
unsigned char *Ph_Data = mem_alloc<unsigned char>(64, num_pac);
// generate SNPs between 0 and 2
for (i = 0; i < num_pac; i++)
for(j = 0; j < num_snp; j++)
SNP_Data[i * num_snp + j] = rand() % 3;
// generate phenotype between 0 and 1
for(int i = 0; i < num_pac; i++) Ph_Data[i] = rand() % 2;
// transpose the SNP data
unsigned char *SNP_Data_trans = mem_alloc<unsigned char>(64, num_pac * num_snp);
for (i = 0; i < num_pac; i++)
for(j = 0; j < num_snp; j++)
SNP_Data_trans[j * num_pac + i] = SNP_Data[i * num_snp + j];
int phen_ones = 0;
for(i = 0; i < num_pac; i++)
if(Ph_Data[i] == 1)
phen_ones++;
// transform SNP data to a binary format
int PP_zeros = ceil((1.0*(num_pac - phen_ones))/32.0);
int PP_ones = ceil((1.0*phen_ones)/32.0);
unsigned int *bin_data_zeros = mem_alloc<unsigned int>(64, num_snp * PP_zeros * 2);
unsigned int *bin_data_ones = mem_alloc<unsigned int>(64, num_snp * PP_ones * 2);
memset(bin_data_zeros, 0, num_snp*PP_zeros*2*sizeof(unsigned int));
memset(bin_data_ones, 0, num_snp*PP_ones*2*sizeof(unsigned int));
for(i = 0; i < num_snp; i++)
{
int x_zeros = -1;
int x_ones = -1;
int n_zeros = 0;
int n_ones = 0;
for(j = 0; j < num_pac; j++){
unsigned int temp = (unsigned int) SNP_Data_trans[i * num_pac + j];
if(Ph_Data[j] == 1){
if(n_ones%32 == 0){
x_ones ++;
}
// apply 1 shift left to 2 components
bin_data_ones[i * PP_ones * 2 + x_ones*2 + 0] <<= 1;
bin_data_ones[i * PP_ones * 2 + x_ones*2 + 1] <<= 1;
// insert '1' in correct component
if(temp == 0 || temp == 1){
bin_data_ones[i * PP_ones * 2 + x_ones*2 + temp ] |= 1;
}
n_ones ++;
} else {
if(n_zeros%32 == 0){
x_zeros ++;
}
// apply 1 shift left to 2 components
bin_data_zeros[i * PP_zeros * 2 + x_zeros*2 + 0] <<= 1;
bin_data_zeros[i * PP_zeros * 2 + x_zeros*2 + 1] <<= 1;
// insert '1' in correct component
if(temp == 0 || temp == 1){
bin_data_zeros[i * PP_zeros * 2 + x_zeros*2 + temp] |= 1;
}
n_zeros ++;
}
}
}
unsigned int mask_zeros = 0xFFFFFFFF;
for(int x = num_pac - phen_ones; x < PP_zeros * 32; x++)
mask_zeros = mask_zeros >> 1;
unsigned int mask_ones = 0xFFFFFFFF;
for(x = phen_ones; x < PP_ones * 32; x++)
mask_ones = mask_ones >> 1;
// transpose the binary data structures
unsigned int* bin_data_ones_trans = mem_alloc<unsigned int>(64, num_snp * PP_ones * 2);
for(i = 0; i < num_snp; i++)
for(j = 0; j < PP_ones; j++)
{
bin_data_ones_trans[(j * num_snp + i) * 2 + 0] = bin_data_ones[(i * PP_ones + j) * 2 + 0];
bin_data_ones_trans[(j * num_snp + i) * 2 + 1] = bin_data_ones[(i * PP_ones + j) * 2 + 1];
}
unsigned int* bin_data_zeros_trans = mem_alloc<unsigned int>(64, num_snp * PP_zeros * 2);
for(i = 0; i < num_snp; i++)
for(j = 0; j < PP_zeros; j++)
{
bin_data_zeros_trans[(j * num_snp + i) * 2 + 0] = bin_data_zeros[(i * PP_zeros + j) * 2 + 0];
bin_data_zeros_trans[(j * num_snp + i) * 2 + 1] = bin_data_zeros[(i * PP_zeros + j) * 2 + 1];
}
float *scores = mem_alloc<float>(64, num_snp * num_snp);
for(x = 0; x < num_snp * num_snp; x++) scores[x] = FLT_MAX;
auto start = myclock::now();
unsigned int* d_data_zeros;
cudaMalloc((void**)&d_data_zeros, num_snp*PP_zeros*2*sizeof(unsigned int));
cudaMemcpy(d_data_zeros, bin_data_zeros_trans,
num_snp*PP_zeros*2*sizeof(unsigned int), cudaMemcpyHostToDevice);
unsigned int* d_data_ones;
cudaMalloc((void**)&d_data_ones, num_snp*PP_ones*2*sizeof(unsigned int));
cudaMemcpy(d_data_ones, bin_data_ones_trans,
num_snp*PP_ones*2*sizeof(unsigned int), cudaMemcpyHostToDevice);
float* d_scores;
cudaMalloc((void**)&d_scores, num_snp*num_snp*sizeof(float));
// setup kernel ND-range
int num_snp_m = num_snp;
while(num_snp_m % block_snp != 0) num_snp_m++;
dim3 grid(num_snp_m / block_snp, num_snp_m);
dim3 block(block_snp, 1);
// epistasis detection kernel
for (int i = 0; i < iteration; i++) {
cudaMemcpy(d_scores, scores, sizeof(float) * num_snp * num_snp, cudaMemcpyHostToDevice);
epi<<<grid, block>>>(d_data_zeros, d_data_ones, d_scores, num_snp,
PP_zeros, PP_ones, mask_zeros, mask_ones);
}
cudaMemcpy(scores, d_scores, sizeof(float) * num_snp * num_snp, cudaMemcpyDeviceToHost);
auto end = myclock::now();
myduration elapsed = end - start;
std::cout << "Total offloading time: " << elapsed.count() << " sec" << std::endl;
// compute the minimum score on a host
float score = scores[0];
int solution = 0;
for (int i = 1; i < num_snp * num_snp; i++) {
if (score > scores[i]) {
score = scores[i];
solution = i;
}
}
std::cout << "Score: " << score << std::endl;
std::cout << "Solution: " << solution / num_snp << ", " << solution % num_snp << std::endl;
if ( (fabsf(score - 83.844f) > 1e-3f) || (solution / num_snp != 1253) ||
(solution % num_snp != 25752) )
std::cout << "FAIL\n";
else
std::cout << "PASS\n";
cudaFree(d_data_zeros);
cudaFree(d_data_ones);
cudaFree(d_scores);
mem_free(bin_data_zeros);
mem_free(bin_data_ones);
mem_free(bin_data_zeros_trans);
mem_free(bin_data_ones_trans);
mem_free(scores);
mem_free(SNP_Data);
mem_free(SNP_Data_trans);
mem_free(Ph_Data);
return 0;
}
|
7a0de33595d86a39db8e08dad86e3bd6fc4b6ebc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ConstantB(bool * x, bool value, size_t idx, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
x[(idx)*N + i] = value;
}
return;
} | 7a0de33595d86a39db8e08dad86e3bd6fc4b6ebc.cu | #include "includes.h"
__global__ void ConstantB(bool * x, bool value, size_t idx, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
x[(idx)*N + i] = value;
}
return;
} |
dff8514076ae639b1f83ed222cd8358cf1dd1d13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ex1.h"
/*--------------------------------------------------------------------------------
AUX
--------------------------------------------------------------------------------*/
__device__ void calculate_histogram(uchar *given_image, int *result_array, int len) {
int thread_id = threadIdx.x;
/*amount of pixels that each thread does over the image. */
int work_per_thread = (IMG_WIDTH * IMG_HEIGHT) / blockDim.x;
/* initialize the histogram */
if (thread_id < 256) {
result_array[thread_id] = 0;
}
__syncthreads();
/* calculate the histogram */
int index;
for (int i=0 ; i < work_per_thread ; i++) {
index = blockDim.x * i + thread_id;
atomicAdd(&result_array[given_image[index]], 1);
}
__syncthreads();
}
__device__ void compute_map(int *cdf, uchar* map) {
int thread_id = threadIdx.x;
if (thread_id < 256) {
map[thread_id] = (256/N_COLORS) * floorf((N_COLORS *cdf[thread_id])/(IMG_HEIGHT * IMG_WIDTH));
}
__syncthreads();
}
__device__ void remap_image(uchar *in_image, uchar *out_image, uchar *map) {
int thread_id = threadIdx.x;
int work_per_thread = (IMG_WIDTH * IMG_HEIGHT) / blockDim.x;
/* remap the image */
int index;
for (int i=0 ; i<work_per_thread ; i++) {
index = blockDim.x * i + thread_id;
out_image[index] = map[in_image[index]];
}
__syncthreads();
}
__device__ void prefix_sum(int arr[], int arr_size) {
int thread_id = threadIdx.x;
int addition;
for (int stride = 1; stride < blockDim.x; stride <<= 1){
//for each thread computes the value, locally and wait until the rest finished.
if (thread_id >= stride && thread_id - stride < arr_size)
addition = arr[thread_id - stride];
//use barrier to wait
__syncthreads();
// write back to global memory once computation is done
if (thread_id >= stride && thread_id < arr_size)
arr[thread_id] += addition;
__syncthreads();
}
}
__device__ void process_image_wrapper(uchar *in, uchar *out) {
__shared__ int cdf[256];
__shared__ uchar m[256];
calculate_histogram(in, cdf, 256);
prefix_sum(cdf, 256);
compute_map(cdf, m);
remap_image(in, out, m);
}
__global__ void process_image_kernel(uchar *in, uchar *out) {
process_image_wrapper(in, out);
}
// process all images
__global__ void process_all_images_kernel(uchar *all_in, uchar *all_out) {
int block_id = blockIdx.x;
int offset = block_id * IMG_WIDTH * IMG_HEIGHT;
process_image_wrapper(all_in + offset, all_out + offset);
}
/* Task serial context struct with necessary CPU / GPU pointers to process a single image */
struct task_serial_context {
uchar* image_in;
uchar* image_out;
};
/* Allocate GPU memory for a single input image and a single output image.
*
* Returns: allocated and initialized task_serial_context. */
struct task_serial_context *task_serial_init()
{
//allocate memeory on the GPU global memory
auto context = new task_serial_context;
hipMalloc(&context->image_in, IMG_WIDTH * IMG_HEIGHT);
hipMalloc(&context->image_out, IMG_WIDTH * IMG_HEIGHT);
return context;
}
/* Process all the images in the given host array and return the output in the
* provided output host array */
void task_serial_process(struct task_serial_context *context, uchar *images_in, uchar *images_out){
//TODO: in a for loop:
// 1. copy the relevant image from images_in to the GPU memory you allocated
// 2. invoke GPU kernel on this image
// 3. copy output from GPU memory to relevant location in images_out_gpu_serial
for (int i=0 ; i < N_IMAGES ; i++) {
uchar *image_in = &images_in[i * IMG_WIDTH * IMG_HEIGHT];
uchar *image_out = &images_out[i * IMG_WIDTH * IMG_HEIGHT];
/* copy the relevant image from images_in to the GPU memory allocated */
hipMemcpy(context->image_in, image_in, IMG_WIDTH * IMG_HEIGHT,hipMemcpyHostToDevice);
int blocks = 1;
int threads_in_block = 1024;
/* invoke the GPU kernel */
hipLaunchKernelGGL(( process_image_kernel), dim3(blocks), dim3(threads_in_block), 0, 0, context->image_in, context->image_out);
/* copy output from GPU memory to relevant location in images_out_gpu_serial */
hipMemcpy(image_out, context->image_out , IMG_WIDTH * IMG_HEIGHT,hipMemcpyDeviceToHost);
}
}
/* Release allocated resources for the task-serial implementation. */
void task_serial_free(struct task_serial_context *context)
{
hipFree(context->image_in);
hipFree(context->image_out);
free(context);
}
/* Bulk GPU context struct with necessary CPU / GPU pointers to process all the images */
struct gpu_bulk_context {
uchar* all_in;
uchar* all_out;
};
/* Allocate GPU memory for all the input and output images.
*
* Returns: allocated and initialized gpu_bulk_context. */
struct gpu_bulk_context *gpu_bulk_init()
{
auto context = new gpu_bulk_context;
hipMalloc(&context->all_in, IMG_WIDTH * IMG_HEIGHT * N_IMAGES);
hipMalloc(&context->all_out, IMG_WIDTH * IMG_HEIGHT * N_IMAGES);
return context;
}
/* Process all the images in the given host array and return the output in the
* provided output host array */
void gpu_bulk_process(struct gpu_bulk_context *context, uchar *images_in, uchar *images_out)
{
//TODO: copy all input images from images_in to the GPU memory you allocated
//TODO: invoke a kernel with N_IMAGES threadblocks, each working on a different image
//TODO: copy output images from GPU memory to images_out
/* copy the relevant images from images_in to the GPU memory allocated */
hipMemcpy(context->all_in, images_in, IMG_WIDTH * IMG_HEIGHT * N_IMAGES ,hipMemcpyHostToDevice);
int blocks = N_IMAGES;
int threads_in_block = 1024;
/* invoke the GPU kernel */
hipLaunchKernelGGL(( process_all_images_kernel), dim3(blocks), dim3(threads_in_block), 0, 0, context->all_in, context->all_out);
/* copy output from GPU memory to relevant location in images_out_gpu_serial */
hipMemcpy(images_out, context->all_out , IMG_WIDTH * IMG_HEIGHT * N_IMAGES,hipMemcpyDeviceToHost);
}
/* Release allocated resources for the bulk GPU implementation. */
void gpu_bulk_free(struct gpu_bulk_context *context)
{
hipFree(context->all_in);
hipFree(context->all_out);
free(context);
}
| dff8514076ae639b1f83ed222cd8358cf1dd1d13.cu | #include "ex1.h"
/*--------------------------------------------------------------------------------
AUX
--------------------------------------------------------------------------------*/
__device__ void calculate_histogram(uchar *given_image, int *result_array, int len) {
int thread_id = threadIdx.x;
/*amount of pixels that each thread does over the image. */
int work_per_thread = (IMG_WIDTH * IMG_HEIGHT) / blockDim.x;
/* initialize the histogram */
if (thread_id < 256) {
result_array[thread_id] = 0;
}
__syncthreads();
/* calculate the histogram */
int index;
for (int i=0 ; i < work_per_thread ; i++) {
index = blockDim.x * i + thread_id;
atomicAdd(&result_array[given_image[index]], 1);
}
__syncthreads();
}
__device__ void compute_map(int *cdf, uchar* map) {
int thread_id = threadIdx.x;
if (thread_id < 256) {
map[thread_id] = (256/N_COLORS) * floorf((N_COLORS *cdf[thread_id])/(IMG_HEIGHT * IMG_WIDTH));
}
__syncthreads();
}
__device__ void remap_image(uchar *in_image, uchar *out_image, uchar *map) {
int thread_id = threadIdx.x;
int work_per_thread = (IMG_WIDTH * IMG_HEIGHT) / blockDim.x;
/* remap the image */
int index;
for (int i=0 ; i<work_per_thread ; i++) {
index = blockDim.x * i + thread_id;
out_image[index] = map[in_image[index]];
}
__syncthreads();
}
__device__ void prefix_sum(int arr[], int arr_size) {
int thread_id = threadIdx.x;
int addition;
for (int stride = 1; stride < blockDim.x; stride <<= 1){
//for each thread computes the value, locally and wait until the rest finished.
if (thread_id >= stride && thread_id - stride < arr_size)
addition = arr[thread_id - stride];
//use barrier to wait
__syncthreads();
// write back to global memory once computation is done
if (thread_id >= stride && thread_id < arr_size)
arr[thread_id] += addition;
__syncthreads();
}
}
__device__ void process_image_wrapper(uchar *in, uchar *out) {
__shared__ int cdf[256];
__shared__ uchar m[256];
calculate_histogram(in, cdf, 256);
prefix_sum(cdf, 256);
compute_map(cdf, m);
remap_image(in, out, m);
}
__global__ void process_image_kernel(uchar *in, uchar *out) {
process_image_wrapper(in, out);
}
// process all images
__global__ void process_all_images_kernel(uchar *all_in, uchar *all_out) {
int block_id = blockIdx.x;
int offset = block_id * IMG_WIDTH * IMG_HEIGHT;
process_image_wrapper(all_in + offset, all_out + offset);
}
/* Task serial context struct with necessary CPU / GPU pointers to process a single image */
struct task_serial_context {
uchar* image_in;
uchar* image_out;
};
/* Allocate GPU memory for a single input image and a single output image.
*
* Returns: allocated and initialized task_serial_context. */
struct task_serial_context *task_serial_init()
{
//allocate memeory on the GPU global memory
auto context = new task_serial_context;
cudaMalloc(&context->image_in, IMG_WIDTH * IMG_HEIGHT);
cudaMalloc(&context->image_out, IMG_WIDTH * IMG_HEIGHT);
return context;
}
/* Process all the images in the given host array and return the output in the
* provided output host array */
void task_serial_process(struct task_serial_context *context, uchar *images_in, uchar *images_out){
//TODO: in a for loop:
// 1. copy the relevant image from images_in to the GPU memory you allocated
// 2. invoke GPU kernel on this image
// 3. copy output from GPU memory to relevant location in images_out_gpu_serial
for (int i=0 ; i < N_IMAGES ; i++) {
uchar *image_in = &images_in[i * IMG_WIDTH * IMG_HEIGHT];
uchar *image_out = &images_out[i * IMG_WIDTH * IMG_HEIGHT];
/* copy the relevant image from images_in to the GPU memory allocated */
cudaMemcpy(context->image_in, image_in, IMG_WIDTH * IMG_HEIGHT,cudaMemcpyHostToDevice);
int blocks = 1;
int threads_in_block = 1024;
/* invoke the GPU kernel */
process_image_kernel<<<blocks, threads_in_block>>>(context->image_in, context->image_out);
/* copy output from GPU memory to relevant location in images_out_gpu_serial */
cudaMemcpy(image_out, context->image_out , IMG_WIDTH * IMG_HEIGHT,cudaMemcpyDeviceToHost);
}
}
/* Release allocated resources for the task-serial implementation. */
void task_serial_free(struct task_serial_context *context)
{
cudaFree(context->image_in);
cudaFree(context->image_out);
free(context);
}
/* Bulk GPU context struct with necessary CPU / GPU pointers to process all the images */
struct gpu_bulk_context {
uchar* all_in;
uchar* all_out;
};
/* Allocate GPU memory for all the input and output images.
*
* Returns: allocated and initialized gpu_bulk_context. */
struct gpu_bulk_context *gpu_bulk_init()
{
auto context = new gpu_bulk_context;
cudaMalloc(&context->all_in, IMG_WIDTH * IMG_HEIGHT * N_IMAGES);
cudaMalloc(&context->all_out, IMG_WIDTH * IMG_HEIGHT * N_IMAGES);
return context;
}
/* Process all the images in the given host array and return the output in the
* provided output host array */
void gpu_bulk_process(struct gpu_bulk_context *context, uchar *images_in, uchar *images_out)
{
//TODO: copy all input images from images_in to the GPU memory you allocated
//TODO: invoke a kernel with N_IMAGES threadblocks, each working on a different image
//TODO: copy output images from GPU memory to images_out
/* copy the relevant images from images_in to the GPU memory allocated */
cudaMemcpy(context->all_in, images_in, IMG_WIDTH * IMG_HEIGHT * N_IMAGES ,cudaMemcpyHostToDevice);
int blocks = N_IMAGES;
int threads_in_block = 1024;
/* invoke the GPU kernel */
process_all_images_kernel<<<blocks, threads_in_block>>>(context->all_in, context->all_out);
/* copy output from GPU memory to relevant location in images_out_gpu_serial */
cudaMemcpy(images_out, context->all_out , IMG_WIDTH * IMG_HEIGHT * N_IMAGES,cudaMemcpyDeviceToHost);
}
/* Release allocated resources for the bulk GPU implementation. */
void gpu_bulk_free(struct gpu_bulk_context *context)
{
cudaFree(context->all_in);
cudaFree(context->all_out);
free(context);
}
|
5996d8c4975532d953446050fa7e587b52a279a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "KnnAlgorithm.h"
static void HandleError( hipError_t err, const char *file, int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__global__ void cuda_knn_predict(float *data, int train_rows, int test_rows, int columns, int * accurate_predictions) {
int total_threads_count = blockDim.x * gridDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int closest_neighbour_index;
float max_float = FLT_MAX;
float* train_data = data;
float* test_data = data + (columns * train_rows);
for (int current_test_row=tid; current_test_row < test_rows; current_test_row=current_test_row+total_threads_count) {
float closest_neighbour_distance = max_float;
float* tst = test_data + (columns * current_test_row);
// for each row in train dataset
for (int i = 0; i < train_rows; ++i) {
float* tr = train_data + (i * columns) + 1;
// calculate eucidlean metric and get the closest one
float sum = 0;
for (int j = 1; j < columns; ++j, ++tr) {
float difference = *(tr) - *(tst +j);
sum = sum + (difference * difference);
}
// distance is euclidean metric for current_test_row and i-th train data
// if our data is closer to that row from train data update closest_neighbour_distance and and closest_neighbour_index
if(sum < closest_neighbour_distance) {
closest_neighbour_distance = sum;
closest_neighbour_index = i;
}
}
// now we have found closest neighbour and have index of that neighbour in closest_neighbour_index variable
// so let's get target class of that neighbour (predicted class) and check if the prediction is accurate
if(*(test_data + (current_test_row * columns)) == *(train_data + (closest_neighbour_index * columns))) {
// if prediction is accurate increment accurate predictions counter
atomicAdd(accurate_predictions, 1);
}
}
}
KnnAlgorithm::KnnAlgorithm() {
//ctor
}
KnnAlgorithm::~KnnAlgorithm() {
//dtor
}
void KnnAlgorithm::fit(Data * data, int percent) {
this->train_rows = (data->rows * percent) / 100;
this->columns = data->columns;
this->test_rows = data->rows - train_rows;
this->train_data = data->data;
this->test_data = data->data + (columns * train_rows);
}
float KnnAlgorithm::predict(int threads_count_per_block, int blocks_count ) {
int accurate_predictions;
int * cuda_accurate_predictions;
hipDeviceProp_t cuda_properties; // information about gpu
HANDLE_ERROR(hipGetDeviceProperties( &cuda_properties, 0));
// copy data to compute into gpu device memory
float *cuda_data;
int data_size = sizeof(float) * (this->test_rows + this->train_rows) * this->columns;
HANDLE_ERROR(hipMalloc((void**)&cuda_data, data_size));
HANDLE_ERROR(hipMemcpy(cuda_data, this->train_data, data_size, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void**)&cuda_accurate_predictions, sizeof(int)));
// measure time using cuda events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// perform knn prediction
hipLaunchKernelGGL(( cuda_knn_predict), dim3(blocks_count), dim3(threads_count_per_block), 0, 0, cuda_data, this->train_rows, this->test_rows, this->columns, cuda_accurate_predictions);
hipEventRecord(stop);
// print elapsed time
hipEventSynchronize(stop);
float elapsed_time = 0;
hipEventElapsedTime(&elapsed_time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Czas obliczen knn: %f\n", elapsed_time/1000);
// copy from gpu device memory to host RAM
HANDLE_ERROR(hipMemcpy(&accurate_predictions, cuda_accurate_predictions, sizeof(int),
hipMemcpyDeviceToHost));
return (accurate_predictions / float(test_rows)) * 100;
}
| 5996d8c4975532d953446050fa7e587b52a279a5.cu | #include "KnnAlgorithm.h"
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__global__ void cuda_knn_predict(float *data, int train_rows, int test_rows, int columns, int * accurate_predictions) {
int total_threads_count = blockDim.x * gridDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int closest_neighbour_index;
float max_float = FLT_MAX;
float* train_data = data;
float* test_data = data + (columns * train_rows);
for (int current_test_row=tid; current_test_row < test_rows; current_test_row=current_test_row+total_threads_count) {
float closest_neighbour_distance = max_float;
float* tst = test_data + (columns * current_test_row);
// for each row in train dataset
for (int i = 0; i < train_rows; ++i) {
float* tr = train_data + (i * columns) + 1;
// calculate eucidlean metric and get the closest one
float sum = 0;
for (int j = 1; j < columns; ++j, ++tr) {
float difference = *(tr) - *(tst +j);
sum = sum + (difference * difference);
}
// distance is euclidean metric for current_test_row and i-th train data
// if our data is closer to that row from train data update closest_neighbour_distance and and closest_neighbour_index
if(sum < closest_neighbour_distance) {
closest_neighbour_distance = sum;
closest_neighbour_index = i;
}
}
// now we have found closest neighbour and have index of that neighbour in closest_neighbour_index variable
// so let's get target class of that neighbour (predicted class) and check if the prediction is accurate
if(*(test_data + (current_test_row * columns)) == *(train_data + (closest_neighbour_index * columns))) {
// if prediction is accurate increment accurate predictions counter
atomicAdd(accurate_predictions, 1);
}
}
}
KnnAlgorithm::KnnAlgorithm() {
//ctor
}
KnnAlgorithm::~KnnAlgorithm() {
//dtor
}
void KnnAlgorithm::fit(Data * data, int percent) {
this->train_rows = (data->rows * percent) / 100;
this->columns = data->columns;
this->test_rows = data->rows - train_rows;
this->train_data = data->data;
this->test_data = data->data + (columns * train_rows);
}
float KnnAlgorithm::predict(int threads_count_per_block, int blocks_count ) {
int accurate_predictions;
int * cuda_accurate_predictions;
cudaDeviceProp cuda_properties; // information about gpu
HANDLE_ERROR(cudaGetDeviceProperties( &cuda_properties, 0));
// copy data to compute into gpu device memory
float *cuda_data;
int data_size = sizeof(float) * (this->test_rows + this->train_rows) * this->columns;
HANDLE_ERROR(cudaMalloc((void**)&cuda_data, data_size));
HANDLE_ERROR(cudaMemcpy(cuda_data, this->train_data, data_size, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void**)&cuda_accurate_predictions, sizeof(int)));
// measure time using cuda events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// perform knn prediction
cuda_knn_predict<<<blocks_count, threads_count_per_block>>>(cuda_data, this->train_rows, this->test_rows, this->columns, cuda_accurate_predictions);
cudaEventRecord(stop);
// print elapsed time
cudaEventSynchronize(stop);
float elapsed_time = 0;
cudaEventElapsedTime(&elapsed_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Czas obliczen knn: %f\n", elapsed_time/1000);
// copy from gpu device memory to host RAM
HANDLE_ERROR(cudaMemcpy(&accurate_predictions, cuda_accurate_predictions, sizeof(int),
cudaMemcpyDeviceToHost));
return (accurate_predictions / float(test_rows)) * 100;
}
|
e013ca7dd00edde850d1b87759158cdf556beb08.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <DllLoader.h>
#include <CudaMathEngine.h>
#include <CudaCommon.h>
#include <CublasFunctions.h>
#include <CusparseFunctions.h>
#include <CudaDevice.h>
#include <CudaAssert.h>
#include <MathEngineCommon.h>
#include <MemoryHandleInternal.h>
#include <MathEngineDeviceStackAllocator.h>
#include <MathEngineHostStackAllocator.h>
#include <math.h>
#include <float.h>
#include <hip/hip_runtime.h>
namespace NeoML {
static __constant__ const float ZeroDev = 0;
static __constant__ const float OneDev = 1;
const int CudaMemoryAlignment = 4;
//------------------------------------------------------------------------------------------------------------
CCudaMathEngine::CCudaMathEngine( const CCusparse* _cusparse, const CCublas* _cublas, std::unique_ptr<CCudaDevice>& _device, int flags ) :
cusparse( _cusparse ),
cublas( _cublas ),
cublasHandle( 0 ),
cusparseHandle( 0 )
{
device.swap( _device );
// CUDA
ASSERT_EXPR( device != 0 );
SetCudaDevice( device->DeviceNumber );
// Cublas.
ASSERT_CUBLAS( cublas->Create( &cublasHandle ) );
cublasMath_t cublasMath = ( flags & GpuMathEngineCublasUseTensorCoresFlag ) == 0 ? CUBLAS_DEFAULT_MATH : CUBLAS_TENSOR_OP_MATH;
ASSERT_CUBLAS( cublas->SetMathMode( cublasHandle, cublasMath ) );
ASSERT_CUBLAS( cublas->SetAtomicsMode( cublasHandle, HIPBLAS_ATOMICS_ALLOWED ) );
ASSERT_CUBLAS( cublas->SetPointerMode( cublasHandle, HIPBLAS_POINTER_MODE_DEVICE ) );
// Cusparse.
ASSERT_CUSPARSE( cusparse->Create( &cusparseHandle ) );
// Constants
ASSERT_CUDA( hipGetSymbolAddress((void**)&cudaConstZero, ZeroDev) );
ASSERT_CUDA( hipGetSymbolAddress((void**)&cudaConstOne, OneDev) );
memoryPool = std::unique_ptr<CMemoryPool>( new CMemoryPool( device->MemoryLimit, this, true ) );
deviceStackRunTime = std::unique_ptr<CDeviceStackAllocator>( new CDeviceStackAllocator( *memoryPool, CudaMemoryAlignment ) );
hostStackRunTime = std::unique_ptr<CHostStackAllocator>( new CHostStackAllocator( CudaMemoryAlignment ) );
CDllLoader::Load(CDllLoader::CUDA_DLL);
}
CCudaMathEngine::~CCudaMathEngine()
{
hostStackRunTime.reset();
deviceStackRunTime.reset();
memoryPool.reset();
cusparse->Destroy( cusparseHandle );
cublas->Destroy( cublasHandle );
CDllLoader::Free(CDllLoader::CUDA_DLL);
}
///////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////
static inline void CudaFixGeom(int& minVal, int maxVal, unsigned int& geom)
{
if(minVal > maxVal) {
minVal = maxVal;
}
if(minVal > (int)geom) {
minVal = (int)geom;
}
if((int)geom > maxVal) {
geom = maxVal;
}
}
// The largest 2^N number smaller than this one (returns 1 for input 1)
static inline int GetMax2ExpLess(int value)
{
const int startExp = 16;
int expStep = startExp >> 1;
int candidate = 1 << startExp;
while(expStep > 0) {
if(candidate >= value) {
candidate >>= expStep;
} else {
candidate <<= expStep;
}
expStep >>= 1;
}
if(candidate >= value) {
candidate >>= 1;
}
return candidate;
}
static inline void CudaFixMinVals(int& minZ, int& minY, int& minX, int maxThreadCount)
{
int nextMin = 0;
while(minX * minY * minZ > maxThreadCount) {
int candidate = nextMin++ % 3;
switch(candidate) {
case 0:
minZ = GetMax2ExpLess(minZ);
break;
case 1:
minY = GetMax2ExpLess(minY);
break;
case 2:
minX = GetMax2ExpLess(minX);
break;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
int CCudaMathEngine::alignXSizeForWarp(int xSize)
{
// Align the size so it is either large than warp or smaller or equal and could be presented as 2^N
// Required for reduction with warps
int candidate = device->WarpSize;
if( xSize >= candidate ) {
return ( ( xSize + candidate - 1 ) / candidate ) * candidate;
}
int next = candidate;
do {
candidate = next;
next = next >> 1;
} while(xSize <= next);
return candidate;
}
void CCudaMathEngine::getCudaTaskGrid(int& blockCount, int& threadCount, int taskCount, int combineCount)
{
ASSERT_EXPR( taskCount > 0 );
ASSERT_EXPR( combineCount > 0 );
int runCount = (taskCount + combineCount - 1) / combineCount;
threadCount = device->ThreadMaxCount;
if(threadCount > runCount) {
threadCount = runCount;
}
blockCount = (runCount + threadCount - 1) / threadCount;
}
void CCudaMathEngine::getCudaTaskGrid2D(dim3& blockCount, dim3& threadCount,
int height, int width, int maxThreadCount)
{
getCudaTaskGrid3DMinZYX(1, 1, 1, blockCount, threadCount, 1, height, width, maxThreadCount);
}
void CCudaMathEngine::getCudaTaskGrid3D(dim3& blockCount, dim3& threadCount,
int batchSize, int height, int width, int maxThreadCount)
{
getCudaTaskGrid3DMinZYX(1, 1, 1, blockCount, threadCount, batchSize, height, width, maxThreadCount);
}
void CCudaMathEngine::getCudaTaskGrid2DMinYX(int minY, int minX, dim3& blockCount, dim3& threadCount,
int height, int width, int maxThreadCount)
{
getCudaTaskGrid3DMinZYX(1, minY, minX, blockCount, threadCount, 1, height, width, maxThreadCount);
}
void CCudaMathEngine::getCudaTaskGrid3DMinZYX(int minZ, int minY, int minX, dim3& blockCount, dim3& threadCount,
int batchSize, int height, int width, int _maxThreadCount)
{
int maxThreadCount = min( device->ThreadMaxCount, static_cast<unsigned int>( _maxThreadCount ) );
ASSERT_EXPR(maxThreadCount >= 1);
ASSERT_EXPR(minZ > 0 && minY > 0 && minX > 0);
ASSERT_EXPR(batchSize > 0 && height > 0 && width > 0);
dim3 geom( device->ThreadMax3DCountX, device->ThreadMax3DCountY, device->ThreadMax3DCountZ );
CudaFixGeom(minX, width, geom.x);
CudaFixGeom(minY, height, geom.y);
CudaFixGeom(minZ, batchSize, geom.z);
CudaFixMinVals(minX, minY, minZ, maxThreadCount);
unsigned int optimalGridSize = INT_MAX;
threadCount = dim3(1, 1, 1);
blockCount = dim3(width, height, batchSize);
dim3 currentGeom;
unsigned int zLimit = min(geom.z * 2, maxThreadCount + 1);
for(currentGeom.z = minZ; currentGeom.z < zLimit; currentGeom.z *= 2) {
unsigned int zBlock = min(currentGeom.z, geom.z);
unsigned int zBlockCount = (batchSize + zBlock - 1) / zBlock;
unsigned int xyMaxThreadCount = maxThreadCount / currentGeom.z;
unsigned int yLimit = min(geom.y * 2, xyMaxThreadCount + 1);
for(currentGeom.y = minY; currentGeom.y < yLimit; currentGeom.y *= 2) {
currentGeom.x = xyMaxThreadCount / currentGeom.y;
if((int)currentGeom.x < minX) {
continue;
}
unsigned int yBlock = min(currentGeom.y, geom.y);
unsigned int yBlockCount = (height + yBlock - 1) / yBlock;
unsigned int xBlock = min(currentGeom.x, geom.x);
unsigned int xBlockCount = (width + xBlock - 1) / xBlock;
unsigned int gridSize = xBlockCount * yBlockCount * zBlockCount;
if(gridSize < optimalGridSize) {
optimalGridSize = gridSize;
threadCount = dim3(xBlock, yBlock, zBlock);
blockCount = dim3(xBlockCount, yBlockCount, zBlockCount);
}
}
}
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
| e013ca7dd00edde850d1b87759158cdf556beb08.cu | /* Copyright © 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <DllLoader.h>
#include <CudaMathEngine.h>
#include <CudaCommon.h>
#include <CublasFunctions.h>
#include <CusparseFunctions.h>
#include <CudaDevice.h>
#include <CudaAssert.h>
#include <MathEngineCommon.h>
#include <MemoryHandleInternal.h>
#include <MathEngineDeviceStackAllocator.h>
#include <MathEngineHostStackAllocator.h>
#include <math.h>
#include <float.h>
#include <cuda_runtime.h>
namespace NeoML {
static __constant__ const float ZeroDev = 0;
static __constant__ const float OneDev = 1;
const int CudaMemoryAlignment = 4;
//------------------------------------------------------------------------------------------------------------
CCudaMathEngine::CCudaMathEngine( const CCusparse* _cusparse, const CCublas* _cublas, std::unique_ptr<CCudaDevice>& _device, int flags ) :
cusparse( _cusparse ),
cublas( _cublas ),
cublasHandle( 0 ),
cusparseHandle( 0 )
{
device.swap( _device );
// CUDA
ASSERT_EXPR( device != 0 );
SetCudaDevice( device->DeviceNumber );
// Cublas.
ASSERT_CUBLAS( cublas->Create( &cublasHandle ) );
cublasMath_t cublasMath = ( flags & GpuMathEngineCublasUseTensorCoresFlag ) == 0 ? CUBLAS_DEFAULT_MATH : CUBLAS_TENSOR_OP_MATH;
ASSERT_CUBLAS( cublas->SetMathMode( cublasHandle, cublasMath ) );
ASSERT_CUBLAS( cublas->SetAtomicsMode( cublasHandle, CUBLAS_ATOMICS_ALLOWED ) );
ASSERT_CUBLAS( cublas->SetPointerMode( cublasHandle, CUBLAS_POINTER_MODE_DEVICE ) );
// Cusparse.
ASSERT_CUSPARSE( cusparse->Create( &cusparseHandle ) );
// Constants
ASSERT_CUDA( cudaGetSymbolAddress((void**)&cudaConstZero, ZeroDev) );
ASSERT_CUDA( cudaGetSymbolAddress((void**)&cudaConstOne, OneDev) );
memoryPool = std::unique_ptr<CMemoryPool>( new CMemoryPool( device->MemoryLimit, this, true ) );
deviceStackRunTime = std::unique_ptr<CDeviceStackAllocator>( new CDeviceStackAllocator( *memoryPool, CudaMemoryAlignment ) );
hostStackRunTime = std::unique_ptr<CHostStackAllocator>( new CHostStackAllocator( CudaMemoryAlignment ) );
CDllLoader::Load(CDllLoader::CUDA_DLL);
}
CCudaMathEngine::~CCudaMathEngine()
{
hostStackRunTime.reset();
deviceStackRunTime.reset();
memoryPool.reset();
cusparse->Destroy( cusparseHandle );
cublas->Destroy( cublasHandle );
CDllLoader::Free(CDllLoader::CUDA_DLL);
}
///////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////
static inline void CudaFixGeom(int& minVal, int maxVal, unsigned int& geom)
{
if(minVal > maxVal) {
minVal = maxVal;
}
if(minVal > (int)geom) {
minVal = (int)geom;
}
if((int)geom > maxVal) {
geom = maxVal;
}
}
// The largest 2^N number smaller than this one (returns 1 for input 1)
static inline int GetMax2ExpLess(int value)
{
const int startExp = 16;
int expStep = startExp >> 1;
int candidate = 1 << startExp;
while(expStep > 0) {
if(candidate >= value) {
candidate >>= expStep;
} else {
candidate <<= expStep;
}
expStep >>= 1;
}
if(candidate >= value) {
candidate >>= 1;
}
return candidate;
}
static inline void CudaFixMinVals(int& minZ, int& minY, int& minX, int maxThreadCount)
{
int nextMin = 0;
while(minX * minY * minZ > maxThreadCount) {
int candidate = nextMin++ % 3;
switch(candidate) {
case 0:
minZ = GetMax2ExpLess(minZ);
break;
case 1:
minY = GetMax2ExpLess(minY);
break;
case 2:
minX = GetMax2ExpLess(minX);
break;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
int CCudaMathEngine::alignXSizeForWarp(int xSize)
{
// Align the size so it is either large than warp or smaller or equal and could be presented as 2^N
// Required for reduction with warps
int candidate = device->WarpSize;
if( xSize >= candidate ) {
return ( ( xSize + candidate - 1 ) / candidate ) * candidate;
}
int next = candidate;
do {
candidate = next;
next = next >> 1;
} while(xSize <= next);
return candidate;
}
void CCudaMathEngine::getCudaTaskGrid(int& blockCount, int& threadCount, int taskCount, int combineCount)
{
ASSERT_EXPR( taskCount > 0 );
ASSERT_EXPR( combineCount > 0 );
int runCount = (taskCount + combineCount - 1) / combineCount;
threadCount = device->ThreadMaxCount;
if(threadCount > runCount) {
threadCount = runCount;
}
blockCount = (runCount + threadCount - 1) / threadCount;
}
void CCudaMathEngine::getCudaTaskGrid2D(dim3& blockCount, dim3& threadCount,
int height, int width, int maxThreadCount)
{
getCudaTaskGrid3DMinZYX(1, 1, 1, blockCount, threadCount, 1, height, width, maxThreadCount);
}
void CCudaMathEngine::getCudaTaskGrid3D(dim3& blockCount, dim3& threadCount,
int batchSize, int height, int width, int maxThreadCount)
{
getCudaTaskGrid3DMinZYX(1, 1, 1, blockCount, threadCount, batchSize, height, width, maxThreadCount);
}
void CCudaMathEngine::getCudaTaskGrid2DMinYX(int minY, int minX, dim3& blockCount, dim3& threadCount,
int height, int width, int maxThreadCount)
{
getCudaTaskGrid3DMinZYX(1, minY, minX, blockCount, threadCount, 1, height, width, maxThreadCount);
}
void CCudaMathEngine::getCudaTaskGrid3DMinZYX(int minZ, int minY, int minX, dim3& blockCount, dim3& threadCount,
int batchSize, int height, int width, int _maxThreadCount)
{
int maxThreadCount = min( device->ThreadMaxCount, static_cast<unsigned int>( _maxThreadCount ) );
ASSERT_EXPR(maxThreadCount >= 1);
ASSERT_EXPR(minZ > 0 && minY > 0 && minX > 0);
ASSERT_EXPR(batchSize > 0 && height > 0 && width > 0);
dim3 geom( device->ThreadMax3DCountX, device->ThreadMax3DCountY, device->ThreadMax3DCountZ );
CudaFixGeom(minX, width, geom.x);
CudaFixGeom(minY, height, geom.y);
CudaFixGeom(minZ, batchSize, geom.z);
CudaFixMinVals(minX, minY, minZ, maxThreadCount);
unsigned int optimalGridSize = INT_MAX;
threadCount = dim3(1, 1, 1);
blockCount = dim3(width, height, batchSize);
dim3 currentGeom;
unsigned int zLimit = min(geom.z * 2, maxThreadCount + 1);
for(currentGeom.z = minZ; currentGeom.z < zLimit; currentGeom.z *= 2) {
unsigned int zBlock = min(currentGeom.z, geom.z);
unsigned int zBlockCount = (batchSize + zBlock - 1) / zBlock;
unsigned int xyMaxThreadCount = maxThreadCount / currentGeom.z;
unsigned int yLimit = min(geom.y * 2, xyMaxThreadCount + 1);
for(currentGeom.y = minY; currentGeom.y < yLimit; currentGeom.y *= 2) {
currentGeom.x = xyMaxThreadCount / currentGeom.y;
if((int)currentGeom.x < minX) {
continue;
}
unsigned int yBlock = min(currentGeom.y, geom.y);
unsigned int yBlockCount = (height + yBlock - 1) / yBlock;
unsigned int xBlock = min(currentGeom.x, geom.x);
unsigned int xBlockCount = (width + xBlock - 1) / xBlock;
unsigned int gridSize = xBlockCount * yBlockCount * zBlockCount;
if(gridSize < optimalGridSize) {
optimalGridSize = gridSize;
threadCount = dim3(xBlock, yBlock, zBlock);
blockCount = dim3(xBlockCount, yBlockCount, zBlockCount);
}
}
}
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
|
af888744452ef1764c15bc8d08f029f7fdf70286.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//fir filtering via fft with cuda
//includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
// includes, project
#include <hipfft.h>
#include <cutil.h>
// Complex data type
typedef float Complex[2];
typedef float Real;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
static __global__ void ComplexPointwiseMulAndScale(Complex*, const Complex*, int, float);
// Filtering functions
void fftFilter(float* hsignal, float* kernel, int n)
{// filteredSignal will hold the convolution of signal and kernel, all of which have n elements.
int memSizeReal = sizeof(Real) * n;
int memSizeComplex = sizeof(Complex) * (n/2+1);
// Allocate device memory for signal
Real* dSignal;
CUDA_SAFE_CALL(hipMalloc((void**)&dSignal, memSizeReal));
// Copy host memory to device
CUDA_SAFE_CALL(hipMemcpy(dSignal, hSignal,memSizeReal,
hipMemcpyHostToDevice));
// Allocate device memory for filter kernel signal, and transforms
Real* dKernel;
CUDA_SAFE_CALL(hipMalloc((void**)&dKernel, memSizeReal));
// Copy host memory to device
CUDA_SAFE_CALL(hipMemcpy(dKernel, hKernel, memSizeReal,
hipMemcpyHostToDevice));
//allocate device memory for transforms
Complex* dKernelTransform;
Complex* dSignalTransform;
CUDA_SAFE_CALL(hipMalloc((void**)&dKernelTransform, memSizeComplex));
CUDA_SAFE_CALL(hipMalloc((void**)&dSignalTransform, memSizeComplex));
// CUFFT plan
hipfftHandle fPlan;//forward plan
hipfftHandle rPlan;//reverse plan
CUFFT_SAFE_CALL(hipfftPlan1d(&fPlan, n, HIPFFT_R2C, 1));
CUFFT_SAFE_CALL(hipfftPlan1d(&rPlan, n, HIPFFT_C2R, 1));
// Transform signal and kernel
CUFFT_SAFE_CALL(hipfftExecR2C(fPlan, (hipfftReal *)dSignal, (hipfftComplex *)dSignalTransform));
CUFFT_SAFE_CALL(hipfftExecR2C(fPlan, (hipfftReal *)dKernel, (hipfftComplex *)dKernelTransform));
// Multiply the coefficients together and normalize the result
hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(32), dim3(256), 0, 0, dSignalTransform, dKernelTransform, n, 1.0f / n);
// Check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed [ ComplexPointwiseMulAndScale ]");
// Transform signal back
CUFFT_SAFE_CALL(hipfftExecC2R(rPlan, (hipfftComplex *)dSignalTransform, (hipfftReal *)dSignal));
CUDA_SAFE_CALL(hipMemcpy(hSignal, dSignal, memSizeReal,
hipMemcpyDeviceToHost));
//Destroy CUFFT context
CUFFT_SAFE_CALL(hipfftDestroy(fPlan));
CUFFT_SAFE_CALL(hipfftDestroy(rPlan));
// cleanup memory
free(hSignal);
free(hKernel);
CUDA_SAFE_CALL(hipFree(dSignal));
CUDA_SAFE_CALL(hipFree(dKernel));
CUDA_SAFE_CALL(hipFree(dKernelTransform));
CUDA_SAFE_CALL(hipFree(dSignalTransform));
}
int main(int argc,char* argv)
{
CUT_DEVICE_INIT(argc,argv);
const int n = 256;
float signal[n];
float kernel[n];
//printf("the signal: \n");
//initialize signal and filter
for (unsigned int i = 0; i<n;i++)
{
if(i%2==0)
signal[i] = 1;
else
signal[i] = -1;
//printf("%1.1f ", signal[i]);
if(i<(1+n/2))
kernel[i]=1;
else
kernel[i]=0;
}
/*
printf("\n \n initialize filteredSignal's memory to constant: \n");
for (unsigned int i = 0; i<n;i++)
printf("%1.1f ", filteredSignal[i]);
printf("\n \n the kernel: \n");
for (unsigned int i = 0; i<n;i++)
printf("%1.1f ", kernel[i]);
printf("\n \n the filtered signal: \n");
*/
//define timing variables
time_t startTime;
time_t endTime;
double runTime;
double timePerCall;
int nCalls = 100000;
time(&startTime);
//call the filtering function
for(unsigned int i = 0; i<nCalls;i++)
fftFilter( &(signal[0]), &(kernel[0]), n);
time(&endTime);
printf("start: %ld ", startTime);
printf("end: %ld ", endTime);
runTime = difftime(endTime,startTime);
timePerCall = runTime*(double)1000/(double)nCalls;
/*
for (unsigned int i = 0; i<n;i++)
printf("%1.1f ", filteredSignal[i]);
*/
printf("\n \n total runtime for %i calls was %f seconds", nCalls, runTime);
printf("\n \n time per call: %f ms" ,timePerCall);
getchar();
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c[0] = a[0] + b[0];
c[1] = a[1] + b[1];
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c[0] = s * a[0];
c[1] = s * a[1];
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c[0] = a[0] * b[0] - a[1] * b[1];
c[1] = a[0] * b[1] + a[1] * b[0];
return c;
}
// Complex pointwise multiplication
static __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
} | af888744452ef1764c15bc8d08f029f7fdf70286.cu | //fir filtering via fft with cuda
//includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
// includes, project
#include <cufft.h>
#include <cutil.h>
// Complex data type
typedef float Complex[2];
typedef float Real;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
static __global__ void ComplexPointwiseMulAndScale(Complex*, const Complex*, int, float);
// Filtering functions
void fftFilter(float* hsignal, float* kernel, int n)
{// filteredSignal will hold the convolution of signal and kernel, all of which have n elements.
int memSizeReal = sizeof(Real) * n;
int memSizeComplex = sizeof(Complex) * (n/2+1);
// Allocate device memory for signal
Real* dSignal;
CUDA_SAFE_CALL(cudaMalloc((void**)&dSignal, memSizeReal));
// Copy host memory to device
CUDA_SAFE_CALL(cudaMemcpy(dSignal, hSignal,memSizeReal,
cudaMemcpyHostToDevice));
// Allocate device memory for filter kernel signal, and transforms
Real* dKernel;
CUDA_SAFE_CALL(cudaMalloc((void**)&dKernel, memSizeReal));
// Copy host memory to device
CUDA_SAFE_CALL(cudaMemcpy(dKernel, hKernel, memSizeReal,
cudaMemcpyHostToDevice));
//allocate device memory for transforms
Complex* dKernelTransform;
Complex* dSignalTransform;
CUDA_SAFE_CALL(cudaMalloc((void**)&dKernelTransform, memSizeComplex));
CUDA_SAFE_CALL(cudaMalloc((void**)&dSignalTransform, memSizeComplex));
// CUFFT plan
cufftHandle fPlan;//forward plan
cufftHandle rPlan;//reverse plan
CUFFT_SAFE_CALL(cufftPlan1d(&fPlan, n, CUFFT_R2C, 1));
CUFFT_SAFE_CALL(cufftPlan1d(&rPlan, n, CUFFT_C2R, 1));
// Transform signal and kernel
CUFFT_SAFE_CALL(cufftExecR2C(fPlan, (cufftReal *)dSignal, (cufftComplex *)dSignalTransform));
CUFFT_SAFE_CALL(cufftExecR2C(fPlan, (cufftReal *)dKernel, (cufftComplex *)dKernelTransform));
// Multiply the coefficients together and normalize the result
ComplexPointwiseMulAndScale<<<32, 256>>>(dSignalTransform, dKernelTransform, n, 1.0f / n);
// Check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed [ ComplexPointwiseMulAndScale ]");
// Transform signal back
CUFFT_SAFE_CALL(cufftExecC2R(rPlan, (cufftComplex *)dSignalTransform, (cufftReal *)dSignal));
CUDA_SAFE_CALL(cudaMemcpy(hSignal, dSignal, memSizeReal,
cudaMemcpyDeviceToHost));
//Destroy CUFFT context
CUFFT_SAFE_CALL(cufftDestroy(fPlan));
CUFFT_SAFE_CALL(cufftDestroy(rPlan));
// cleanup memory
free(hSignal);
free(hKernel);
CUDA_SAFE_CALL(cudaFree(dSignal));
CUDA_SAFE_CALL(cudaFree(dKernel));
CUDA_SAFE_CALL(cudaFree(dKernelTransform));
CUDA_SAFE_CALL(cudaFree(dSignalTransform));
}
int main(int argc,char* argv)
{
CUT_DEVICE_INIT(argc,argv);
const int n = 256;
float signal[n];
float kernel[n];
//printf("the signal: \n");
//initialize signal and filter
for (unsigned int i = 0; i<n;i++)
{
if(i%2==0)
signal[i] = 1;
else
signal[i] = -1;
//printf("%1.1f ", signal[i]);
if(i<(1+n/2))
kernel[i]=1;
else
kernel[i]=0;
}
/*
printf("\n \n initialize filteredSignal's memory to constant: \n");
for (unsigned int i = 0; i<n;i++)
printf("%1.1f ", filteredSignal[i]);
printf("\n \n the kernel: \n");
for (unsigned int i = 0; i<n;i++)
printf("%1.1f ", kernel[i]);
printf("\n \n the filtered signal: \n");
*/
//define timing variables
time_t startTime;
time_t endTime;
double runTime;
double timePerCall;
int nCalls = 100000;
time(&startTime);
//call the filtering function
for(unsigned int i = 0; i<nCalls;i++)
fftFilter( &(signal[0]), &(kernel[0]), n);
time(&endTime);
printf("start: %ld ", startTime);
printf("end: %ld ", endTime);
runTime = difftime(endTime,startTime);
timePerCall = runTime*(double)1000/(double)nCalls;
/*
for (unsigned int i = 0; i<n;i++)
printf("%1.1f ", filteredSignal[i]);
*/
printf("\n \n total runtime for %i calls was %f seconds", nCalls, runTime);
printf("\n \n time per call: %f ms" ,timePerCall);
getchar();
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c[0] = a[0] + b[0];
c[1] = a[1] + b[1];
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c[0] = s * a[0];
c[1] = s * a[1];
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c[0] = a[0] * b[0] - a[1] * b[1];
c[1] = a[0] * b[1] + a[1] * b[0];
return c;
}
// Complex pointwise multiplication
static __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
} |
cb4018a1f7dd0bf83def6525d575df6ff3f7c6dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/distance_op.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/cub_namespace.cuh"
#include <hipcub/hipcub.hpp>
namespace caffe2 {
namespace {
template <typename T>
__global__ void SquaredL2DistanceKernel(
const int N, const int D, const T* X, const T* Y, T* distance) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float dist = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
T diff = X[i * D + j] - Y[i * D + j];
dist += diff * diff;
}
float total_dist = BlockReduce(temp_storage).Sum(dist);
__syncthreads();
if (threadIdx.x == 0) {
distance[i] = total_dist / 2.0;
}
}
}
} // namespace
template <>
bool SquaredL2DistanceOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
CAFFE_ENFORCE_EQ(X.dim(), Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE_EQ(
X.dim32(i),
Y.dim32(i),
"Mismatch in dimensions",
X.sizes(),
" / ",
Y.sizes());
}
int N = X.dim() > 0 ? X.dim32(0) : 1;
int D = X.size() / N;
auto* distance = Output(0, vector<int64_t>(size_t(1), N), at::dtype<float>());
hipLaunchKernelGGL(( SquaredL2DistanceKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
X.data<float>(),
Y.data<float>(),
distance->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
namespace {
template <typename T>
__global__ void
StripedScaleKernel(const int N, const int D, const T* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int k = i / D;
y[i] = x[i] * alpha[k];
}
}
}
template <>
bool SquaredL2DistanceGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dDistance = Input(2);
int N = X.dim() > 0 ? X.dim32(0) : 1;
int D = N > 0 ? X.size() / N : 0;
CAFFE_ENFORCE(X.dim() == Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE_EQ(
X.dim32(i),
Y.dim32(i),
"Mismatch on dimensions: ",
X.sizes(),
" / ",
Y.sizes());
}
CAFFE_ENFORCE_EQ(dDistance.dim(), 1);
CAFFE_ENFORCE_EQ(dDistance.dim32(0), N);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
auto* dY = Output(1, Y.sizes(), at::dtype<float>());
math::Sub<float, CUDAContext>(
X.size(),
X.data<float>(),
Y.data<float>(),
dX->template mutable_data<float>(),
&context_);
hipLaunchKernelGGL(( StripedScaleKernel<float>)
, dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
dDistance.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
// The gradient of the other side is basically the negative.
math::Scale<float, float, CUDAContext>(
X.size(),
-1,
dX->data<float>(),
dY->template mutable_data<float>(),
&context_);
return true;
}
namespace {
template <typename T>
__global__ void L1DistanceKernel(
const int N,
const int D,
const T* X,
const T* Y,
T* distance) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float sum = 0.0f;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
sum += fabsf(
convert::To<T, float>(X[i * D + j]) -
convert::To<T, float>(Y[i * D + j]));
}
float aggregate = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
if (threadIdx.x == 0) {
distance[i] = aggregate;
}
}
}
} // namespace
template <>
bool L1DistanceOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
CAFFE_ENFORCE_EQ(X.dim(), Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i));
}
const int N = X.dim() > 0 ? X.dim32(0) : 1;
const int D = N > 0 ? X.size() / N : 0;
auto* distance = Output(0, vector<int64_t>(size_t(1), N), at::dtype<float>());
hipLaunchKernelGGL(( L1DistanceKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
X.data<float>(),
Y.data<float>(),
distance->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
namespace {
template <typename T>
__global__ void L1DistanceGradientKernel(
const int N,
const int D,
const T* X,
const T* Y,
const T* dDistance,
T* dX,
T* dY) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
constexpr float kEps = 1e-12;
int k = i / D;
if (X[i] - Y[i] < -kEps) {
dX[i] = -dDistance[k];
dY[i] = dDistance[k];
} else if (X[i] - Y[i] > kEps) {
dX[i] = dDistance[k];
dY[i] = -dDistance[k];
} else {
dX[i] = 0;
dY[i] = 0;
}
}
}
} // namespace
template <>
bool L1DistanceGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dDistance = Input(2);
int N = X.dim() > 0 ? X.dim32(0) : 1;
int D = N > 0 ? X.size() / N : 0;
CAFFE_ENFORCE(X.dim() == Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE_EQ(
X.dim32(i),
Y.dim32(i),
"Mismatch on dimensions: ",
X.sizes(),
" / ",
Y.sizes());
}
CAFFE_ENFORCE_EQ(dDistance.dim(), 1);
CAFFE_ENFORCE_EQ(dDistance.dim32(0), N);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
auto* dY = Output(1, Y.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( L1DistanceGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
X.data<float>(),
Y.data<float>(),
dDistance.data<float>(),
dX->template mutable_data<float>(),
dY->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
namespace {
template <typename T>
__global__ void
DotProductKernel(const int N, const int D, const T* X, const T* Y, T* result) {
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T partialSum = 0;
int offset = i * D;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
partialSum += X[offset + j] * Y[offset + j];
}
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T sum = BlockReduce(temp_storage).Sum(partialSum);
__syncthreads();
if (threadIdx.x == 0) {
result[i] = sum;
}
}
}
// X.size() = N*D, Y.size() = N
template <typename T>
__global__ void
BatchedMul(const int N, const int D, const T* X, const T* Y, T* result) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
result[i] = X[i] * Y[i / D];
}
}
// X.size() = N*D, Y.size() = N
template <typename T>
__global__ void Scale2AxpyScale(
const int N,
const T* scale,
const T* XY,
const T* XN,
T* result) {
CUDA_1D_KERNEL_LOOP(i, N) {
result[i] = -scale[i] * XY[i] / (XN[i] * XN[i]);
}
}
// X.size() = X*N, alpha.size() = N, Y.size() = X*N
template <typename T>
__global__ void
BatchedAxpy(const int N, const int D, const T* alpha, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
Y[i] += X[i] * alpha[i / D];
}
}
} // namespace
template <>
bool CosineSimilarityOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
CAFFE_ENFORCE_EQ(X.dim(), Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i));
}
const int N = X.dim() > 0 ? X.dim32(0) : 1;
const int D = X.size_from_dim(1);
auto* result = Output(COS_OUT, {N}, at::dtype<float>());
float* result_data = result->template mutable_data<float>();
const float* X_data = X.data<float>();
const float* Y_data = Y.data<float>();
// Auxiliary arrays, one allocation of memory
ReinitializeTensor(&aux_, {2 * N}, at::dtype<float>().device(CUDA));
float* aux_data = aux_.mutable_data<float>();
float* x2 = aux_data;
float* y2 = aux_data + N;
float* scale = x2;
const float kEps = 1e-12f;
hipLaunchKernelGGL(( DotProductKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, X_data, X_data, x2);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( DotProductKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, Y_data, Y_data, y2);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( DotProductKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, X_data, Y_data, result_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
math::Maximum<float, CUDAContext>(N, kEps, x2, x2, &context_);
math::Maximum<float, CUDAContext>(N, kEps, y2, y2, &context_);
math::Mul(N, x2, y2, scale, &context_);
math::Rsqrt(N, scale, scale, &context_);
math::Mul(N, result_data, scale, result_data, &context_);
return true;
}
template <>
bool CosineSimilarityGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
auto& dCos = Input(DER_COS_IN);
const int N = X.dim() > 0 ? X.dim32(0) : 1;
const int D = X.size_from_dim(1);
CAFFE_ENFORCE(X.dim() == Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i));
}
CAFFE_ENFORCE(dCos.dim() == 1);
CAFFE_ENFORCE(dCos.dim32(0) == N);
auto* dX = Output(DER_X_OUT, X.sizes(), at::dtype<float>());
auto* dY = Output(DER_Y_OUT, Y.sizes(), at::dtype<float>());
const auto* X_data = X.data<float>();
const auto* Y_data = Y.data<float>();
const auto* dCos_data = dCos.data<float>();
auto* dX_data = dX->template mutable_data<float>();
auto* dY_data = dY->template mutable_data<float>();
// one memory allocation, a few arrays
ReinitializeTensor(&aux_, {6 * N}, at::dtype<float>().device(CUDA));
float* aux_data = aux_.mutable_data<float>();
float* xn = aux_data;
float* yn = aux_data + N;
float* xy = aux_data + 2 * N;
float* xyn = aux_data + 3 * N;
float* scale = aux_data + 4 * N;
float* axpy_scale = aux_data + 5 * N;
float kEps = 1e-12f;
// ||x||
hipLaunchKernelGGL(( DotProductKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, X_data, X_data, xn);
C10_HIP_KERNEL_LAUNCH_CHECK();
math::Maximum<float, CUDAContext>(N, kEps, xn, xn, &context_);
math::Sqrt<float, CUDAContext>(N, xn, xn, &context_);
// ||y||
hipLaunchKernelGGL(( DotProductKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, Y_data, Y_data, yn);
C10_HIP_KERNEL_LAUNCH_CHECK();
math::Maximum<float, CUDAContext>(N, kEps, yn, yn, &context_);
math::Sqrt<float, CUDAContext>(N, yn, yn, &context_);
// ||x|| * || y ||
math::Mul<float, CUDAContext>(N, xn, yn, xyn, &context_);
hipLaunchKernelGGL(( DotProductKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, X_data, Y_data, xy);
C10_HIP_KERNEL_LAUNCH_CHECK();
math::Div<float, CUDAContext>(N, dCos_data, xyn, scale, &context_);
// dX
hipLaunchKernelGGL(( BatchedMul<float>),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, Y_data, scale, dX_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( Scale2AxpyScale<float>),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, scale, xy, xn, axpy_scale);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( BatchedAxpy<float>),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, axpy_scale, X_data, dX_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
// dY
hipLaunchKernelGGL(( BatchedMul<float>),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, X_data, scale, dY_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( Scale2AxpyScale<float>),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, scale, xy, yn, axpy_scale);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( BatchedAxpy<float>),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, axpy_scale, Y_data, dY_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool DotProductOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
CAFFE_ENFORCE_EQ(X.dim(), Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i));
}
int N, D;
if (X.size() > 0) {
N = X.dim() > 0 ? X.dim32(0) : 1;
D = X.size() / N;
} else {
N = 0;
D = 0;
}
auto* result = Output(DOT_OUT, {N}, at::dtype<float>());
hipLaunchKernelGGL(( DotProductKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
X.data<float>(),
Y.data<float>(),
result->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
namespace {
template <typename T>
__global__ void DotProductGradientKernel(
const int N,
const int D,
const T* X,
const T* Y,
const T* dDot,
T* dX,
T* dY) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
T scale = dDot[i / D];
dX[i] = Y[i] * scale;
dY[i] = X[i] * scale;
}
}
} // namespace
template <>
bool DotProductGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
auto& dDot = Input(DER_DOT_IN);
int N, D;
if (X.size() > 0) {
N = X.dim() > 0 ? X.dim32(0) : 1;
D = X.size() / N;
} else {
N = 0;
D = 0;
}
CAFFE_ENFORCE(X.dim() == Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i));
}
CAFFE_ENFORCE(dDot.dim() == 1);
CAFFE_ENFORCE(dDot.dim32(0) == N);
auto* dX = Output(DER_X_OUT, X.sizes(), at::dtype<float>());
auto* dY = Output(DER_Y_OUT, Y.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( DotProductGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
X.data<float>(),
Y.data<float>(),
dDot.data<float>(),
dX->template mutable_data<float>(),
dY->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(SquaredL2Distance,
SquaredL2DistanceOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SquaredL2DistanceGradient,
SquaredL2DistanceGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(L1Distance, L1DistanceOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
L1DistanceGradient,
L1DistanceGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(DotProduct, DotProductOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
DotProductGradient,
DotProductGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
CosineSimilarity,
CosineSimilarityOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
CosineSimilarityGradient,
CosineSimilarityGradientOp<float, CUDAContext>);
} // namespace caffe2
| cb4018a1f7dd0bf83def6525d575df6ff3f7c6dd.cu | #include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/distance_op.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/cub_namespace.cuh"
#include <cub/block/block_reduce.cuh>
namespace caffe2 {
namespace {
template <typename T>
__global__ void SquaredL2DistanceKernel(
const int N, const int D, const T* X, const T* Y, T* distance) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float dist = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
T diff = X[i * D + j] - Y[i * D + j];
dist += diff * diff;
}
float total_dist = BlockReduce(temp_storage).Sum(dist);
__syncthreads();
if (threadIdx.x == 0) {
distance[i] = total_dist / 2.0;
}
}
}
} // namespace
template <>
bool SquaredL2DistanceOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
CAFFE_ENFORCE_EQ(X.dim(), Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE_EQ(
X.dim32(i),
Y.dim32(i),
"Mismatch in dimensions",
X.sizes(),
" / ",
Y.sizes());
}
int N = X.dim() > 0 ? X.dim32(0) : 1;
int D = X.size() / N;
auto* distance = Output(0, vector<int64_t>(size_t(1), N), at::dtype<float>());
SquaredL2DistanceKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
X.data<float>(),
Y.data<float>(),
distance->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
namespace {
template <typename T>
__global__ void
StripedScaleKernel(const int N, const int D, const T* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int k = i / D;
y[i] = x[i] * alpha[k];
}
}
}
template <>
bool SquaredL2DistanceGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dDistance = Input(2);
int N = X.dim() > 0 ? X.dim32(0) : 1;
int D = N > 0 ? X.size() / N : 0;
CAFFE_ENFORCE(X.dim() == Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE_EQ(
X.dim32(i),
Y.dim32(i),
"Mismatch on dimensions: ",
X.sizes(),
" / ",
Y.sizes());
}
CAFFE_ENFORCE_EQ(dDistance.dim(), 1);
CAFFE_ENFORCE_EQ(dDistance.dim32(0), N);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
auto* dY = Output(1, Y.sizes(), at::dtype<float>());
math::Sub<float, CUDAContext>(
X.size(),
X.data<float>(),
Y.data<float>(),
dX->template mutable_data<float>(),
&context_);
StripedScaleKernel<float>
<<<CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
dDistance.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
// The gradient of the other side is basically the negative.
math::Scale<float, float, CUDAContext>(
X.size(),
-1,
dX->data<float>(),
dY->template mutable_data<float>(),
&context_);
return true;
}
namespace {
template <typename T>
__global__ void L1DistanceKernel(
const int N,
const int D,
const T* X,
const T* Y,
T* distance) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float sum = 0.0f;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
sum += fabsf(
convert::To<T, float>(X[i * D + j]) -
convert::To<T, float>(Y[i * D + j]));
}
float aggregate = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
if (threadIdx.x == 0) {
distance[i] = aggregate;
}
}
}
} // namespace
template <>
bool L1DistanceOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
CAFFE_ENFORCE_EQ(X.dim(), Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i));
}
const int N = X.dim() > 0 ? X.dim32(0) : 1;
const int D = N > 0 ? X.size() / N : 0;
auto* distance = Output(0, vector<int64_t>(size_t(1), N), at::dtype<float>());
L1DistanceKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
X.data<float>(),
Y.data<float>(),
distance->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
namespace {
template <typename T>
__global__ void L1DistanceGradientKernel(
const int N,
const int D,
const T* X,
const T* Y,
const T* dDistance,
T* dX,
T* dY) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
constexpr float kEps = 1e-12;
int k = i / D;
if (X[i] - Y[i] < -kEps) {
dX[i] = -dDistance[k];
dY[i] = dDistance[k];
} else if (X[i] - Y[i] > kEps) {
dX[i] = dDistance[k];
dY[i] = -dDistance[k];
} else {
dX[i] = 0;
dY[i] = 0;
}
}
}
} // namespace
template <>
bool L1DistanceGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dDistance = Input(2);
int N = X.dim() > 0 ? X.dim32(0) : 1;
int D = N > 0 ? X.size() / N : 0;
CAFFE_ENFORCE(X.dim() == Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE_EQ(
X.dim32(i),
Y.dim32(i),
"Mismatch on dimensions: ",
X.sizes(),
" / ",
Y.sizes());
}
CAFFE_ENFORCE_EQ(dDistance.dim(), 1);
CAFFE_ENFORCE_EQ(dDistance.dim32(0), N);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
auto* dY = Output(1, Y.sizes(), at::dtype<float>());
L1DistanceGradientKernel<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
X.data<float>(),
Y.data<float>(),
dDistance.data<float>(),
dX->template mutable_data<float>(),
dY->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
namespace {
template <typename T>
__global__ void
DotProductKernel(const int N, const int D, const T* X, const T* Y, T* result) {
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T partialSum = 0;
int offset = i * D;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
partialSum += X[offset + j] * Y[offset + j];
}
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T sum = BlockReduce(temp_storage).Sum(partialSum);
__syncthreads();
if (threadIdx.x == 0) {
result[i] = sum;
}
}
}
// X.size() = N*D, Y.size() = N
template <typename T>
__global__ void
BatchedMul(const int N, const int D, const T* X, const T* Y, T* result) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
result[i] = X[i] * Y[i / D];
}
}
// X.size() = N*D, Y.size() = N
template <typename T>
__global__ void Scale2AxpyScale(
const int N,
const T* scale,
const T* XY,
const T* XN,
T* result) {
CUDA_1D_KERNEL_LOOP(i, N) {
result[i] = -scale[i] * XY[i] / (XN[i] * XN[i]);
}
}
// X.size() = X*N, alpha.size() = N, Y.size() = X*N
template <typename T>
__global__ void
BatchedAxpy(const int N, const int D, const T* alpha, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
Y[i] += X[i] * alpha[i / D];
}
}
} // namespace
template <>
bool CosineSimilarityOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
CAFFE_ENFORCE_EQ(X.dim(), Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i));
}
const int N = X.dim() > 0 ? X.dim32(0) : 1;
const int D = X.size_from_dim(1);
auto* result = Output(COS_OUT, {N}, at::dtype<float>());
float* result_data = result->template mutable_data<float>();
const float* X_data = X.data<float>();
const float* Y_data = Y.data<float>();
// Auxiliary arrays, one allocation of memory
ReinitializeTensor(&aux_, {2 * N}, at::dtype<float>().device(CUDA));
float* aux_data = aux_.mutable_data<float>();
float* x2 = aux_data;
float* y2 = aux_data + N;
float* scale = x2;
const float kEps = 1e-12f;
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, X_data, X_data, x2);
C10_CUDA_KERNEL_LAUNCH_CHECK();
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, Y_data, Y_data, y2);
C10_CUDA_KERNEL_LAUNCH_CHECK();
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, X_data, Y_data, result_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
math::Maximum<float, CUDAContext>(N, kEps, x2, x2, &context_);
math::Maximum<float, CUDAContext>(N, kEps, y2, y2, &context_);
math::Mul(N, x2, y2, scale, &context_);
math::Rsqrt(N, scale, scale, &context_);
math::Mul(N, result_data, scale, result_data, &context_);
return true;
}
template <>
bool CosineSimilarityGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
auto& dCos = Input(DER_COS_IN);
const int N = X.dim() > 0 ? X.dim32(0) : 1;
const int D = X.size_from_dim(1);
CAFFE_ENFORCE(X.dim() == Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i));
}
CAFFE_ENFORCE(dCos.dim() == 1);
CAFFE_ENFORCE(dCos.dim32(0) == N);
auto* dX = Output(DER_X_OUT, X.sizes(), at::dtype<float>());
auto* dY = Output(DER_Y_OUT, Y.sizes(), at::dtype<float>());
const auto* X_data = X.data<float>();
const auto* Y_data = Y.data<float>();
const auto* dCos_data = dCos.data<float>();
auto* dX_data = dX->template mutable_data<float>();
auto* dY_data = dY->template mutable_data<float>();
// one memory allocation, a few arrays
ReinitializeTensor(&aux_, {6 * N}, at::dtype<float>().device(CUDA));
float* aux_data = aux_.mutable_data<float>();
float* xn = aux_data;
float* yn = aux_data + N;
float* xy = aux_data + 2 * N;
float* xyn = aux_data + 3 * N;
float* scale = aux_data + 4 * N;
float* axpy_scale = aux_data + 5 * N;
float kEps = 1e-12f;
// ||x||
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, X_data, X_data, xn);
C10_CUDA_KERNEL_LAUNCH_CHECK();
math::Maximum<float, CUDAContext>(N, kEps, xn, xn, &context_);
math::Sqrt<float, CUDAContext>(N, xn, xn, &context_);
// ||y||
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, Y_data, Y_data, yn);
C10_CUDA_KERNEL_LAUNCH_CHECK();
math::Maximum<float, CUDAContext>(N, kEps, yn, yn, &context_);
math::Sqrt<float, CUDAContext>(N, yn, yn, &context_);
// ||x|| * || y ||
math::Mul<float, CUDAContext>(N, xn, yn, xyn, &context_);
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, X_data, Y_data, xy);
C10_CUDA_KERNEL_LAUNCH_CHECK();
math::Div<float, CUDAContext>(N, dCos_data, xyn, scale, &context_);
// dX
BatchedMul<float><<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, Y_data, scale, dX_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
Scale2AxpyScale<float><<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, scale, xy, xn, axpy_scale);
C10_CUDA_KERNEL_LAUNCH_CHECK();
BatchedAxpy<float><<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, axpy_scale, X_data, dX_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
// dY
BatchedMul<float><<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, X_data, scale, dY_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
Scale2AxpyScale<float><<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, scale, xy, yn, axpy_scale);
C10_CUDA_KERNEL_LAUNCH_CHECK();
BatchedAxpy<float><<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, axpy_scale, Y_data, dY_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool DotProductOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
CAFFE_ENFORCE_EQ(X.dim(), Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i));
}
int N, D;
if (X.size() > 0) {
N = X.dim() > 0 ? X.dim32(0) : 1;
D = X.size() / N;
} else {
N = 0;
D = 0;
}
auto* result = Output(DOT_OUT, {N}, at::dtype<float>());
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
X.data<float>(),
Y.data<float>(),
result->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
namespace {
template <typename T>
__global__ void DotProductGradientKernel(
const int N,
const int D,
const T* X,
const T* Y,
const T* dDot,
T* dX,
T* dY) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
T scale = dDot[i / D];
dX[i] = Y[i] * scale;
dY[i] = X[i] * scale;
}
}
} // namespace
template <>
bool DotProductGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
auto& dDot = Input(DER_DOT_IN);
int N, D;
if (X.size() > 0) {
N = X.dim() > 0 ? X.dim32(0) : 1;
D = X.size() / N;
} else {
N = 0;
D = 0;
}
CAFFE_ENFORCE(X.dim() == Y.dim());
for (int i = 0; i < X.dim(); ++i) {
CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i));
}
CAFFE_ENFORCE(dDot.dim() == 1);
CAFFE_ENFORCE(dDot.dim32(0) == N);
auto* dX = Output(DER_X_OUT, X.sizes(), at::dtype<float>());
auto* dY = Output(DER_Y_OUT, Y.sizes(), at::dtype<float>());
DotProductGradientKernel<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
X.data<float>(),
Y.data<float>(),
dDot.data<float>(),
dX->template mutable_data<float>(),
dY->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(SquaredL2Distance,
SquaredL2DistanceOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SquaredL2DistanceGradient,
SquaredL2DistanceGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(L1Distance, L1DistanceOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
L1DistanceGradient,
L1DistanceGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(DotProduct, DotProductOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
DotProductGradient,
DotProductGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
CosineSimilarity,
CosineSimilarityOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
CosineSimilarityGradient,
CosineSimilarityGradientOp<float, CUDAContext>);
} // namespace caffe2
|
c46a353bc364779675c4e333dcde1161a45c850d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void rgbToGreyKernel(int height,int width ,unsigned char *input_img, unsigned char *output_img)
{
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
if(row<height && col<width)
{
int idx = row*width + col;
float red = (float)input_img[3*idx];
float green = (float)input_img[3*idx+1];
float blue = (float)input_img[3*idx+2];
output_img[idx] = 0.21*red + 0.71*green + 0.07*blue;
}
} | c46a353bc364779675c4e333dcde1161a45c850d.cu | #include "includes.h"
__global__ void rgbToGreyKernel(int height,int width ,unsigned char *input_img, unsigned char *output_img)
{
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
if(row<height && col<width)
{
int idx = row*width + col;
float red = (float)input_img[3*idx];
float green = (float)input_img[3*idx+1];
float blue = (float)input_img[3*idx+2];
output_img[idx] = 0.21*red + 0.71*green + 0.07*blue;
}
} |
519f7fd545fa4ca15627a5bbeb2c0af2cc8b00bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parquet_gpu.h"
#if (__CUDACC_VER_MAJOR__ >= 9)
#define SHFL0(v) __shfl_sync(~0, v, 0)
#define SHFL(v, t) __shfl_sync(~0, v, t)
#define SYNCWARP() __syncwarp()
#define BALLOT(v) __ballot_sync(~0, v)
#else
#define SHFL0(v) __shfl(v, 0)
#define SHFL(v, t) __shfl(v, t)
#define SYNCWARP()
#define BALLOT(v) __ballot(v)
#endif
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
// Minimal thrift implementation for parsing page headers
enum {
ST_FLD_TRUE = 1,
ST_FLD_FALSE = 2,
ST_FLD_BYTE = 3,
ST_FLD_I16 = 4,
ST_FLD_I32 = 5,
ST_FLD_I64 = 6,
ST_FLD_DOUBLE = 7,
ST_FLD_BINARY = 8,
ST_FLD_LIST = 9,
ST_FLD_SET = 10,
ST_FLD_MAP = 11,
ST_FLD_STRUCT = 12,
};
static const __device__ __constant__ uint8_t g_list2struct[16] =
{
0, 1, 2, ST_FLD_BYTE,
ST_FLD_DOUBLE, 5, ST_FLD_I16, 7,
ST_FLD_I32, 9, ST_FLD_I64, ST_FLD_BINARY,
ST_FLD_STRUCT, ST_FLD_MAP, ST_FLD_SET, ST_FLD_LIST
};
struct byte_stream_s
{
const uint8_t *cur;
const uint8_t *end;
const uint8_t *base;
// Parsed symbols
PageType page_type;
PageInfo page;
ColumnChunkDesc ck;
};
inline __device__ unsigned int getb(byte_stream_s *bs)
{
return (bs->cur < bs->end) ? *bs->cur++ : 0;
}
inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt)
{
bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur));
bs->cur += bytecnt;
}
__device__ uint32_t get_u32(byte_stream_s *bs)
{
uint32_t v = 0, l = 0, c;
do
{
c = getb(bs);
v |= (c & 0x7f) << l;
l += 7;
} while (c & 0x80);
return v;
}
inline __device__ int32_t get_i32(byte_stream_s *bs)
{
uint32_t u = get_u32(bs);
return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
}
__device__ void skip_struct_field(byte_stream_s *bs, int t)
{
int struct_depth = 0;
int rep_cnt = 0;
do {
if (rep_cnt != 0)
{
rep_cnt--;
}
else if (struct_depth != 0)
{
int c;
do {
c = getb(bs);
if (!c)
--struct_depth;
} while (!c && struct_depth);
if (!struct_depth)
break;
t = c & 0xf;
if (!(c & 0xf0))
get_i32(bs);
}
switch (t)
{
case ST_FLD_TRUE:
case ST_FLD_FALSE:
break;
case ST_FLD_I16:
case ST_FLD_I32:
case ST_FLD_I64:
get_u32(bs);
break;
case ST_FLD_DOUBLE:
skip_bytes(bs, 8);
break;
case ST_FLD_BINARY:
skip_bytes(bs, get_u32(bs));
break;
case ST_FLD_LIST:
case ST_FLD_SET:
{ // NOTE: skipping a list of lists is not handled
int c = getb(bs);
int n = c >> 4;
if (n == 0xf)
n = get_u32(bs);
t = g_list2struct[c & 0xf];
if (t == ST_FLD_STRUCT)
struct_depth += n;
else
rep_cnt = n;
}
break;
case ST_FLD_STRUCT:
struct_depth++;
break;
}
} while (rep_cnt || struct_depth);
}
#define PARQUET_BEGIN_STRUCT(fn) \
__device__ bool fn(byte_stream_s *bs) \
{ \
int fld = 0; \
for (;;) \
{ \
int c, t, f; \
c = getb(bs); \
if (!c) \
break; \
f = c >> 4; \
t = c & 0xf; \
fld = (f) ? fld+f : get_i32(bs); \
switch(fld) { \
#define PARQUET_FLD_ENUM(id, m, mt) \
case id: bs->m = (mt)get_i32(bs); if (t != ST_FLD_I32) return false; break; \
#define PARQUET_FLD_INT32(id, m) \
case id: bs->m = get_i32(bs); if (t != ST_FLD_I32) return false; break; \
#define PARQUET_FLD_STRUCT(id, m) \
case id: if (t != ST_FLD_STRUCT || !m(bs)) return false; break; \
#define PARQUET_END_STRUCT() \
default: \
skip_struct_field(bs, t); \
break; \
} \
} \
return true; \
} \
PARQUET_BEGIN_STRUCT(gpuParseDataPageHeader)
PARQUET_FLD_INT32(1, page.num_values)
PARQUET_FLD_ENUM(2, page.encoding, Encoding);
PARQUET_FLD_ENUM(3, page.definition_level_encoding, Encoding);
PARQUET_FLD_ENUM(4, page.repetition_level_encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParseDictionaryPageHeader)
PARQUET_FLD_INT32(1, page.num_values)
PARQUET_FLD_ENUM(2, page.encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParseDataPageHeaderV2)
PARQUET_FLD_INT32(1, page.num_values)
PARQUET_FLD_INT32(3, page.num_rows)
PARQUET_FLD_ENUM(4, page.encoding, Encoding);
PARQUET_FLD_ENUM(5, page.definition_level_encoding, Encoding);
PARQUET_FLD_ENUM(6, page.repetition_level_encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParsePageHeader)
PARQUET_FLD_ENUM(1, page_type, PageType)
PARQUET_FLD_INT32(2, page.uncompressed_page_size)
PARQUET_FLD_INT32(3, page.compressed_page_size)
PARQUET_FLD_STRUCT(5, gpuParseDataPageHeader)
PARQUET_FLD_STRUCT(7, gpuParseDictionaryPageHeader)
PARQUET_FLD_STRUCT(8, gpuParseDataPageHeaderV2)
PARQUET_END_STRUCT()
/**
* @brief Kernel for outputting page headers from the specified column chunks
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
**/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks)
{
__shared__ byte_stream_s bs_g[4];
int t = threadIdx.x & 0x1f;
int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5);
byte_stream_s * const bs = &bs_g[threadIdx.x >> 5];
if (chunk < num_chunks)
{
// NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128
if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t))
{
((uint32_t *)&bs->ck)[t] = ((const uint32_t *)&chunks[chunk])[t];
}
}
__syncthreads();
if (chunk < num_chunks)
{
size_t num_values, values_found;
uint32_t data_page_count = 0;
uint32_t dictionary_page_count = 0;
int32_t max_num_pages;
int32_t num_dict_pages = bs->ck.num_dict_pages;
PageInfo *page_info;
if (!t)
{
bs->base = bs->cur = bs->ck.compressed_data;
bs->end = bs->base + bs->ck.compressed_size;
bs->page.chunk_idx = chunk;
bs->page.chunk_row = 0;
bs->page.num_rows = 0;
}
num_values = bs->ck.num_values;
page_info = bs->ck.page_info;
num_dict_pages = bs->ck.num_dict_pages;
max_num_pages = (page_info) ? bs->ck.max_num_pages : 0;
values_found = 0;
SYNCWARP();
while (values_found < num_values && bs->cur < bs->end)
{
int index_out = -1;
if (t == 0)
{
bs->page.chunk_row += bs->page.num_rows;
bs->page.num_rows = 0;
if (gpuParsePageHeader(bs) && bs->page.compressed_page_size >= 0)
{
switch (bs->page_type)
{
case DATA_PAGE:
// TODO: Unless the file only uses V2 page headers or has no complex nesting (num_rows == num_values), we can't infer num_rows at this time
// -> we'll need another pass after decompression to parse the definition and repetition levels to infer the correct value of num_rows
bs->page.num_rows = bs->page.num_values; // Assumes num_rows == num_values
// Fall-through to V2
case DATA_PAGE_V2:
index_out = num_dict_pages + data_page_count;
data_page_count++;
bs->page.flags = 0;
values_found += bs->page.num_values;
break;
case DICTIONARY_PAGE:
index_out = dictionary_page_count;
dictionary_page_count++;
bs->page.flags = PAGEINFO_FLAGS_DICTIONARY;
break;
default:
index_out = -1;
break;
}
bs->page.page_data = const_cast<uint8_t *>(bs->cur);
bs->cur += bs->page.compressed_page_size;
}
else
{
bs->cur = bs->end;
}
}
index_out = SHFL0(index_out);
if (index_out >= 0 && index_out < max_num_pages)
{
// NOTE: Assumes that sizeof(PageInfo) <= 128
if (t < sizeof(PageInfo) / sizeof(uint32_t))
{
((uint32_t *)(page_info + index_out))[t] = ((const uint32_t *)&bs->page)[t];
}
}
num_values = SHFL0(num_values);
SYNCWARP();
}
if (t == 0)
{
chunks[chunk].num_data_pages = data_page_count;
chunks[chunk].num_dict_pages = dictionary_page_count;
}
}
}
/**
* @brief Kernel for building dictionary index for the specified column chunks
*
* This function builds an index to point to each dictionary entry
* (string format is 4-byte little-endian string length followed by character
* data). The index is a 32-bit integer which contains the offset of each string
* relative to the beginning of the dictionary page data.
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
**/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks)
{
__shared__ ColumnChunkDesc chunk_g[4];
int t = threadIdx.x & 0x1f;
int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5);
ColumnChunkDesc * const ck = &chunk_g[threadIdx.x >> 5];
if (chunk < num_chunks)
{
// NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128
if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t))
{
((uint32_t *)ck)[t] = ((const uint32_t *)&chunks[chunk])[t];
}
}
__syncthreads();
if (chunk >= num_chunks)
{
return;
}
if (!t && ck->num_dict_pages > 0 && ck->str_dict_index)
{
// Data type to describe a string
nvstrdesc_s *dict_index = ck->str_dict_index;
const uint8_t *dict = ck->page_info[0].page_data;
int dict_size = ck->page_info[0].uncompressed_page_size;
int num_entries = ck->page_info[0].num_values;
int pos = 0, cur = 0;
for (int i = 0; i < num_entries; i++)
{
int len = 0;
if (cur + 4 <= dict_size)
{
len = dict[cur+0] | (dict[cur+1] << 8) | (dict[cur+2] << 16) | (dict[cur+3] << 24);
if (len >= 0 && cur + 4 + len <= dict_size)
{
pos = cur;
cur = cur + 4 + len;
}
else
{
cur = dict_size;
}
}
// TODO: Could store 8 entries in shared mem, then do a single warp-wide store
dict_index[i].ptr = (const char *)(dict + pos + 4);
dict_index[i].count = len;
}
}
}
hipError_t __host__ DecodePageHeaders(ColumnChunkDesc *chunks,
int32_t num_chunks,
hipStream_t stream) {
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
hipLaunchKernelGGL(( gpuDecodePageHeaders), dim3(dim_grid), dim3(dim_block), 0, stream, chunks, num_chunks);
return hipSuccess;
}
hipError_t __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks,
int32_t num_chunks,
hipStream_t stream) {
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
hipLaunchKernelGGL(( gpuBuildStringDictionaryIndex), dim3(dim_grid), dim3(dim_block), 0, stream, chunks, num_chunks);
return hipSuccess;
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
| 519f7fd545fa4ca15627a5bbeb2c0af2cc8b00bf.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parquet_gpu.h"
#if (__CUDACC_VER_MAJOR__ >= 9)
#define SHFL0(v) __shfl_sync(~0, v, 0)
#define SHFL(v, t) __shfl_sync(~0, v, t)
#define SYNCWARP() __syncwarp()
#define BALLOT(v) __ballot_sync(~0, v)
#else
#define SHFL0(v) __shfl(v, 0)
#define SHFL(v, t) __shfl(v, t)
#define SYNCWARP()
#define BALLOT(v) __ballot(v)
#endif
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
// Minimal thrift implementation for parsing page headers
enum {
ST_FLD_TRUE = 1,
ST_FLD_FALSE = 2,
ST_FLD_BYTE = 3,
ST_FLD_I16 = 4,
ST_FLD_I32 = 5,
ST_FLD_I64 = 6,
ST_FLD_DOUBLE = 7,
ST_FLD_BINARY = 8,
ST_FLD_LIST = 9,
ST_FLD_SET = 10,
ST_FLD_MAP = 11,
ST_FLD_STRUCT = 12,
};
static const __device__ __constant__ uint8_t g_list2struct[16] =
{
0, 1, 2, ST_FLD_BYTE,
ST_FLD_DOUBLE, 5, ST_FLD_I16, 7,
ST_FLD_I32, 9, ST_FLD_I64, ST_FLD_BINARY,
ST_FLD_STRUCT, ST_FLD_MAP, ST_FLD_SET, ST_FLD_LIST
};
struct byte_stream_s
{
const uint8_t *cur;
const uint8_t *end;
const uint8_t *base;
// Parsed symbols
PageType page_type;
PageInfo page;
ColumnChunkDesc ck;
};
inline __device__ unsigned int getb(byte_stream_s *bs)
{
return (bs->cur < bs->end) ? *bs->cur++ : 0;
}
inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt)
{
bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur));
bs->cur += bytecnt;
}
__device__ uint32_t get_u32(byte_stream_s *bs)
{
uint32_t v = 0, l = 0, c;
do
{
c = getb(bs);
v |= (c & 0x7f) << l;
l += 7;
} while (c & 0x80);
return v;
}
inline __device__ int32_t get_i32(byte_stream_s *bs)
{
uint32_t u = get_u32(bs);
return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
}
__device__ void skip_struct_field(byte_stream_s *bs, int t)
{
int struct_depth = 0;
int rep_cnt = 0;
do {
if (rep_cnt != 0)
{
rep_cnt--;
}
else if (struct_depth != 0)
{
int c;
do {
c = getb(bs);
if (!c)
--struct_depth;
} while (!c && struct_depth);
if (!struct_depth)
break;
t = c & 0xf;
if (!(c & 0xf0))
get_i32(bs);
}
switch (t)
{
case ST_FLD_TRUE:
case ST_FLD_FALSE:
break;
case ST_FLD_I16:
case ST_FLD_I32:
case ST_FLD_I64:
get_u32(bs);
break;
case ST_FLD_DOUBLE:
skip_bytes(bs, 8);
break;
case ST_FLD_BINARY:
skip_bytes(bs, get_u32(bs));
break;
case ST_FLD_LIST:
case ST_FLD_SET:
{ // NOTE: skipping a list of lists is not handled
int c = getb(bs);
int n = c >> 4;
if (n == 0xf)
n = get_u32(bs);
t = g_list2struct[c & 0xf];
if (t == ST_FLD_STRUCT)
struct_depth += n;
else
rep_cnt = n;
}
break;
case ST_FLD_STRUCT:
struct_depth++;
break;
}
} while (rep_cnt || struct_depth);
}
#define PARQUET_BEGIN_STRUCT(fn) \
__device__ bool fn(byte_stream_s *bs) \
{ \
int fld = 0; \
for (;;) \
{ \
int c, t, f; \
c = getb(bs); \
if (!c) \
break; \
f = c >> 4; \
t = c & 0xf; \
fld = (f) ? fld+f : get_i32(bs); \
switch(fld) { \
#define PARQUET_FLD_ENUM(id, m, mt) \
case id: bs->m = (mt)get_i32(bs); if (t != ST_FLD_I32) return false; break; \
#define PARQUET_FLD_INT32(id, m) \
case id: bs->m = get_i32(bs); if (t != ST_FLD_I32) return false; break; \
#define PARQUET_FLD_STRUCT(id, m) \
case id: if (t != ST_FLD_STRUCT || !m(bs)) return false; break; \
#define PARQUET_END_STRUCT() \
default: \
skip_struct_field(bs, t); \
break; \
} \
} \
return true; \
} \
PARQUET_BEGIN_STRUCT(gpuParseDataPageHeader)
PARQUET_FLD_INT32(1, page.num_values)
PARQUET_FLD_ENUM(2, page.encoding, Encoding);
PARQUET_FLD_ENUM(3, page.definition_level_encoding, Encoding);
PARQUET_FLD_ENUM(4, page.repetition_level_encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParseDictionaryPageHeader)
PARQUET_FLD_INT32(1, page.num_values)
PARQUET_FLD_ENUM(2, page.encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParseDataPageHeaderV2)
PARQUET_FLD_INT32(1, page.num_values)
PARQUET_FLD_INT32(3, page.num_rows)
PARQUET_FLD_ENUM(4, page.encoding, Encoding);
PARQUET_FLD_ENUM(5, page.definition_level_encoding, Encoding);
PARQUET_FLD_ENUM(6, page.repetition_level_encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParsePageHeader)
PARQUET_FLD_ENUM(1, page_type, PageType)
PARQUET_FLD_INT32(2, page.uncompressed_page_size)
PARQUET_FLD_INT32(3, page.compressed_page_size)
PARQUET_FLD_STRUCT(5, gpuParseDataPageHeader)
PARQUET_FLD_STRUCT(7, gpuParseDictionaryPageHeader)
PARQUET_FLD_STRUCT(8, gpuParseDataPageHeaderV2)
PARQUET_END_STRUCT()
/**
* @brief Kernel for outputting page headers from the specified column chunks
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
**/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks)
{
__shared__ byte_stream_s bs_g[4];
int t = threadIdx.x & 0x1f;
int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5);
byte_stream_s * const bs = &bs_g[threadIdx.x >> 5];
if (chunk < num_chunks)
{
// NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128
if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t))
{
((uint32_t *)&bs->ck)[t] = ((const uint32_t *)&chunks[chunk])[t];
}
}
__syncthreads();
if (chunk < num_chunks)
{
size_t num_values, values_found;
uint32_t data_page_count = 0;
uint32_t dictionary_page_count = 0;
int32_t max_num_pages;
int32_t num_dict_pages = bs->ck.num_dict_pages;
PageInfo *page_info;
if (!t)
{
bs->base = bs->cur = bs->ck.compressed_data;
bs->end = bs->base + bs->ck.compressed_size;
bs->page.chunk_idx = chunk;
bs->page.chunk_row = 0;
bs->page.num_rows = 0;
}
num_values = bs->ck.num_values;
page_info = bs->ck.page_info;
num_dict_pages = bs->ck.num_dict_pages;
max_num_pages = (page_info) ? bs->ck.max_num_pages : 0;
values_found = 0;
SYNCWARP();
while (values_found < num_values && bs->cur < bs->end)
{
int index_out = -1;
if (t == 0)
{
bs->page.chunk_row += bs->page.num_rows;
bs->page.num_rows = 0;
if (gpuParsePageHeader(bs) && bs->page.compressed_page_size >= 0)
{
switch (bs->page_type)
{
case DATA_PAGE:
// TODO: Unless the file only uses V2 page headers or has no complex nesting (num_rows == num_values), we can't infer num_rows at this time
// -> we'll need another pass after decompression to parse the definition and repetition levels to infer the correct value of num_rows
bs->page.num_rows = bs->page.num_values; // Assumes num_rows == num_values
// Fall-through to V2
case DATA_PAGE_V2:
index_out = num_dict_pages + data_page_count;
data_page_count++;
bs->page.flags = 0;
values_found += bs->page.num_values;
break;
case DICTIONARY_PAGE:
index_out = dictionary_page_count;
dictionary_page_count++;
bs->page.flags = PAGEINFO_FLAGS_DICTIONARY;
break;
default:
index_out = -1;
break;
}
bs->page.page_data = const_cast<uint8_t *>(bs->cur);
bs->cur += bs->page.compressed_page_size;
}
else
{
bs->cur = bs->end;
}
}
index_out = SHFL0(index_out);
if (index_out >= 0 && index_out < max_num_pages)
{
// NOTE: Assumes that sizeof(PageInfo) <= 128
if (t < sizeof(PageInfo) / sizeof(uint32_t))
{
((uint32_t *)(page_info + index_out))[t] = ((const uint32_t *)&bs->page)[t];
}
}
num_values = SHFL0(num_values);
SYNCWARP();
}
if (t == 0)
{
chunks[chunk].num_data_pages = data_page_count;
chunks[chunk].num_dict_pages = dictionary_page_count;
}
}
}
/**
* @brief Kernel for building dictionary index for the specified column chunks
*
* This function builds an index to point to each dictionary entry
* (string format is 4-byte little-endian string length followed by character
* data). The index is a 32-bit integer which contains the offset of each string
* relative to the beginning of the dictionary page data.
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
**/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks)
{
__shared__ ColumnChunkDesc chunk_g[4];
int t = threadIdx.x & 0x1f;
int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5);
ColumnChunkDesc * const ck = &chunk_g[threadIdx.x >> 5];
if (chunk < num_chunks)
{
// NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128
if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t))
{
((uint32_t *)ck)[t] = ((const uint32_t *)&chunks[chunk])[t];
}
}
__syncthreads();
if (chunk >= num_chunks)
{
return;
}
if (!t && ck->num_dict_pages > 0 && ck->str_dict_index)
{
// Data type to describe a string
nvstrdesc_s *dict_index = ck->str_dict_index;
const uint8_t *dict = ck->page_info[0].page_data;
int dict_size = ck->page_info[0].uncompressed_page_size;
int num_entries = ck->page_info[0].num_values;
int pos = 0, cur = 0;
for (int i = 0; i < num_entries; i++)
{
int len = 0;
if (cur + 4 <= dict_size)
{
len = dict[cur+0] | (dict[cur+1] << 8) | (dict[cur+2] << 16) | (dict[cur+3] << 24);
if (len >= 0 && cur + 4 + len <= dict_size)
{
pos = cur;
cur = cur + 4 + len;
}
else
{
cur = dict_size;
}
}
// TODO: Could store 8 entries in shared mem, then do a single warp-wide store
dict_index[i].ptr = (const char *)(dict + pos + 4);
dict_index[i].count = len;
}
}
}
cudaError_t __host__ DecodePageHeaders(ColumnChunkDesc *chunks,
int32_t num_chunks,
cudaStream_t stream) {
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
gpuDecodePageHeaders<<<dim_grid, dim_block, 0, stream>>>(chunks, num_chunks);
return cudaSuccess;
}
cudaError_t __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks,
int32_t num_chunks,
cudaStream_t stream) {
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
gpuBuildStringDictionaryIndex<<<dim_grid, dim_block, 0, stream>>>(chunks, num_chunks);
return cudaSuccess;
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
57fda215b72e2ef33db2ec3c0e90248219ffdd1e.hip | // !!! This is a file automatically generated by hipify!!!
/* -----------------------------------------------------------------------------
*
* Module : Sort
* Copyright : (c) [2009..2012] Trevor L. McDonell, Kevin Ying
* License : BSD
*
* ---------------------------------------------------------------------------*/
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sort.h>
#include "algorithms.h"
#include "functors.h"
#include "utils.h"
void sort_val_f(float *d_keys_raw, uint32_t *d_vals_raw, uint32_t N)
{
thrust::device_ptr<float> d_keys(d_keys_raw);
thrust::device_ptr<uint32_t> d_vals(d_vals_raw);
thrust::sort_by_key(d_keys, d_keys + N, d_vals);
}
void sort_idx_f(float *d_keys_raw, uint32_t *d_idx_raw, uint32_t N, uint32_t init)
{
thrust::device_ptr<float> d_keys(d_keys_raw);
thrust::device_ptr<uint32_t> d_idx(d_idx_raw);
thrust::sequence(d_idx, d_idx + N, init);
thrust::sort_by_key(d_keys, d_keys + N, d_idx);
}
void sort_idx_rf(float *d_keys_raw, uint32_t *d_idx_raw, uint32_t N, uint32_t init)
{
#ifdef _BENCH
hipDeviceSynchronize();
time_t t_beg, t_end;
time(&t_beg);
std::cerr << "sort_idx_rf" << std::endl;
#endif
thrust::device_ptr<float> d_keys(d_keys_raw);
thrust::device_ptr<uint32_t> d_idx(d_idx_raw);
thrust::sequence(d_idx, d_idx + N, init);
thrust::sort_by_key(d_keys, d_keys+N, d_idx, thrust::greater<float>());
#ifdef _BENCH
hipDeviceSynchronize();
time(&t_end);
std::cerr<< "Time elapsed for sort_idx_rf: " << difftime(t_end,t_beg) << " seconds" << std::endl;
#endif
}
void sort_rf(float *d_keys_raw, uint32_t *d_vals_raw, uint32_t N)
{
#ifdef _BENCH
hipDeviceSynchronize();
time_t t_beg, t_end;
time(&t_beg);
std::cerr << "sort_rf" << std::endl;
#endif
thrust::device_ptr<float> d_keys(d_keys_raw);
thrust::device_ptr<uint32_t> d_vals(d_vals_raw);
thrust::sort_by_key(d_keys, d_keys+N, d_vals, thrust::greater<float>());
#ifdef _BENCH
hipDeviceSynchronize();
time(&t_end);
std::cerr<< "Time elapsed for sort_rf: " << difftime(t_end,t_beg) << " seconds" << std::endl;
#endif
}
| 57fda215b72e2ef33db2ec3c0e90248219ffdd1e.cu | /* -----------------------------------------------------------------------------
*
* Module : Sort
* Copyright : (c) [2009..2012] Trevor L. McDonell, Kevin Ying
* License : BSD
*
* ---------------------------------------------------------------------------*/
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sort.h>
#include "algorithms.h"
#include "functors.h"
#include "utils.h"
void sort_val_f(float *d_keys_raw, uint32_t *d_vals_raw, uint32_t N)
{
thrust::device_ptr<float> d_keys(d_keys_raw);
thrust::device_ptr<uint32_t> d_vals(d_vals_raw);
thrust::sort_by_key(d_keys, d_keys + N, d_vals);
}
void sort_idx_f(float *d_keys_raw, uint32_t *d_idx_raw, uint32_t N, uint32_t init)
{
thrust::device_ptr<float> d_keys(d_keys_raw);
thrust::device_ptr<uint32_t> d_idx(d_idx_raw);
thrust::sequence(d_idx, d_idx + N, init);
thrust::sort_by_key(d_keys, d_keys + N, d_idx);
}
void sort_idx_rf(float *d_keys_raw, uint32_t *d_idx_raw, uint32_t N, uint32_t init)
{
#ifdef _BENCH
cudaThreadSynchronize();
time_t t_beg, t_end;
time(&t_beg);
std::cerr << "sort_idx_rf" << std::endl;
#endif
thrust::device_ptr<float> d_keys(d_keys_raw);
thrust::device_ptr<uint32_t> d_idx(d_idx_raw);
thrust::sequence(d_idx, d_idx + N, init);
thrust::sort_by_key(d_keys, d_keys+N, d_idx, thrust::greater<float>());
#ifdef _BENCH
cudaThreadSynchronize();
time(&t_end);
std::cerr<< "Time elapsed for sort_idx_rf: " << difftime(t_end,t_beg) << " seconds" << std::endl;
#endif
}
void sort_rf(float *d_keys_raw, uint32_t *d_vals_raw, uint32_t N)
{
#ifdef _BENCH
cudaThreadSynchronize();
time_t t_beg, t_end;
time(&t_beg);
std::cerr << "sort_rf" << std::endl;
#endif
thrust::device_ptr<float> d_keys(d_keys_raw);
thrust::device_ptr<uint32_t> d_vals(d_vals_raw);
thrust::sort_by_key(d_keys, d_keys+N, d_vals, thrust::greater<float>());
#ifdef _BENCH
cudaThreadSynchronize();
time(&t_end);
std::cerr<< "Time elapsed for sort_rf: " << difftime(t_end,t_beg) << " seconds" << std::endl;
#endif
}
|
8072907ee55bc5e1445631564adb2895756fd94b.hip | // !!! This is a file automatically generated by hipify!!!
// Week 4
// cuSPARSE vs Kernel Testbench.
// [email protected]
// written by SukJoon Oh
#include <hip/hip_runtime.h>
// #include <device_launch_parameters.h>
#include <hipsparse.h> // hipsparseSpMV
#include <stdio.h>
#include <stdlib.h>
#include "mmio.h"
#include "errchk.cuh"
#include "spmv_kernel.cuh"
// Option settings
// #define COO
#define CSR
#define CUSPARSE
// #define SCALAR_KERNEL
// #define VECTOR_KERNEL
// ---- main() ----
// Entry
int main(int argc, char* argv[])
{
int test_iterations = 0;
int N = 0;
int M = 0;
int NZ = 0;
int* host_JR = NULL;
int* host_JC = NULL;
float* host_AA = NULL;
int* host_P = NULL;
int* device_JR = NULL;
int* device_JC = NULL;
float* device_AA = NULL;
float* device_AA_sorted = NULL;
int* device_P = NULL;
void* buffer = NULL;
size_t buffer_size = 0;
hipsparseHandle_t handle = NULL;
hipStream_t stream = NULL;
if (argc == 1 || argc == 2) { printf("Too few arguments.\nProgram exit.\n"); exit(0); }
if (argc >= 4) { printf("Too many argmuments.\nProgram exit.\n"); exit(0); }
test_iterations = atoi(argv[1]);
printf("(arg1) Target iterations: %d\n", test_iterations);
printf("(arg2) File name: %s\n", argv[2]);
//
// Reading file
{
FILE* MTX;
MTX = fopen(argv[2], "r");
MM_typecode matrix_code;
printf("Reading %s... \n", argv[2]);
// Read banner, type, etc essential infos
// Verification steps are ignored.
if (mm_read_banner(MTX, &matrix_code) != 0) exit(1);
mm_read_mtx_crd_size(MTX, &M, &N, &NZ); // Over max 1025
host_JR = (int*)malloc(NZ * sizeof(int));
host_JC = (int*)malloc(NZ * sizeof(int));
host_AA = (float*)malloc(NZ * sizeof(float));
host_P = (int*)malloc(NZ * sizeof(int));
for (register int i = 0; i < NZ; i++)
fscanf(MTX, "%d %d %f\n", &host_JR[i], &host_JC[i], &host_AA[i]);
fclose(MTX);
}
// ---- Step 1. Load info ----
printf("(File info)\tm : %d, n : %d, nz : %d\n", M, N, NZ);
printf("Printing samples...\n");
printf("JR: "); for (register int i = 0; i < 10; i++) printf("%6.0d", host_JR[i]); printf("\n");
printf("JC: "); for (register int i = 0; i < 10; i++) printf("%6.0d", host_JC[i]); printf("\n");
printf("AA: "); for (register int i = 0; i < 10; i++) printf("%6.0lf", host_AA[i]); printf("\n");
printf("File successfully loaded.\n");
// ---- Step 2. Handle create, bind a stream ----
printf("Preparing for cusparseXcoosort...\n");
CUDA_ERR(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
CUSPARSE_ERR(hipsparseCreate(&handle));
CUSPARSE_ERR(hipsparseSetStream(handle, stream));
// ---- Step 3. Allocate Buffer ----
CUSPARSE_ERR(hipsparseXcoosort_bufferSizeExt(handle, M, N, NZ, device_JR, device_JC, &buffer_size));
printf("Buffer allocation for hipsparseXcoosortByRow : %ld Byte\n", buffer_size);
CUDA_ERR(hipMalloc((void**)&device_JR, sizeof(int) * NZ));
CUDA_ERR(hipMalloc((void**)&device_JC, sizeof(int) * NZ));
CUDA_ERR(hipMalloc((void**)&device_P, sizeof(int) * NZ));
CUDA_ERR(hipMalloc((void**)&device_AA, sizeof(float) * NZ));
CUDA_ERR(hipMalloc((void**)&device_AA_sorted, sizeof(float) * NZ));
CUDA_ERR(hipMalloc((void**)&buffer, sizeof(char) * buffer_size));
CUDA_ERR(hipMemcpy(device_JR, host_JR, sizeof(int) * NZ, hipMemcpyHostToDevice));
CUDA_ERR(hipMemcpy(device_JC, host_JC, sizeof(int) * NZ, hipMemcpyHostToDevice));
CUDA_ERR(hipMemcpy(device_AA, host_AA, sizeof(float) * NZ, hipMemcpyHostToDevice));
CUDA_ERR(hipDeviceSynchronize());
printf("Allocation/Memcopy to GPU done.\n");
// ---- Step 4. Setup permutation vector P to Identity ----
CUSPARSE_ERR(hipsparseCreateIdentityPermutation(handle, NZ, device_P));
// ---- Step 5. Sort ----
CUSPARSE_ERR(hipsparseXcoosortByRow(handle, M, N, NZ, device_JR, device_JC, device_P, buffer));
printf("hipsparseXcoosortByRow done.\n");
// Gather
CUSPARSE_ERR(hipsparseSgthr(handle, NZ, device_AA, device_AA_sorted, device_P, HIPSPARSE_INDEX_BASE_ZERO));
CUDA_ERR(hipDeviceSynchronize());
// Fetch back
CUDA_ERR(hipMemcpy(host_JR, device_JR, sizeof(int) * NZ, hipMemcpyDeviceToHost));
CUDA_ERR(hipMemcpy(host_JC, device_JC, sizeof(int) * NZ, hipMemcpyDeviceToHost));
CUDA_ERR(hipMemcpy(host_P, device_P, sizeof(int) * NZ, hipMemcpyDeviceToHost));
CUDA_ERR(hipMemcpy(host_AA, device_AA_sorted, sizeof(float) * NZ, hipMemcpyDeviceToHost));
CUDA_ERR(hipDeviceSynchronize());
// Free memories
if (device_P) hipFree(device_P);
if (device_AA) hipFree(device_AA);
if (buffer) hipFree(buffer);
if (handle) hipsparseDestroy(handle);
if (stream) hipStreamDestroy(stream);
free(host_P); // Unnecessary
printf("Printing sorted values...\n");
printf("JR: "); for (register int i = 0; i < 10; i++) printf("%6.0d", host_JR[i]); printf("\n");
printf("JC: "); for (register int i = 0; i < 10; i++) printf("%6.0d", host_JC[i]); printf("\n");
printf("AA: "); for (register int i = 0; i < 10; i++) printf("%6.0lf", host_AA[i]); printf("\n");
#ifdef CSR
printf("Converting COO to CSR...\n");
if (device_JR) hipFree(device_JR);
if (device_JC) hipFree(device_JC);
int* t_JR = (int*)calloc((M + 1), sizeof(int));
for (int i = 0; i < M + 1; i++) t_JR[i]++;
for (int i = 0; i < NZ; i++) t_JR[host_JR[i]]++;
for (int i = 0; i < M; i++) t_JR[i + 1] += (t_JR[i] - 1);
free(host_JR);
host_JR = t_JR; // switch
printf("Done.\n");
printf("JR: "); for (register int i = 0; i < 10; i++) printf("%6.0d", host_JR[i]); printf("\n");
printf("JC: "); for (register int i = 0; i < 10; i++) printf("%6.0d", host_JC[i]); printf("\n");
printf("AA: "); for (register int i = 0; i < 10; i++) printf("%6.0lf", host_AA[i]); printf("\n");
#endif
{
handle = NULL;
buffer = NULL;
buffer_size = 0;
float average = 0;
float elapsed = 0;
hipEvent_t start, stop;
#ifdef CUSPARSE
printf("Test: CUSPARSE\n");
#else
printf("Test: KERNEL\n");
#endif
// ---- Step 7. Define variables
const float alpha = 1;
const float beta = 0;
float host_y[N] = {0, };
float host_x[M];
for (auto& elem : host_x) elem = 1; // Set x to have all ones.
float* device_x = NULL;
float* device_y = NULL;
hipsparseSpMatDescr_t sp_mtx; // device
hipsparseDnVecDescr_t dn_x, dn_y; // device
CUSPARSE_ERR(hipsparseCreate(&handle));
CUDA_ERR(hipMalloc((void**)&device_x, sizeof(float) * M));
CUDA_ERR(hipMalloc((void**)&device_y, sizeof(float) * N));
CUDA_ERR(hipMemcpy(device_x, host_x, sizeof(float) * M, hipMemcpyHostToDevice));
CUDA_ERR(hipMemcpy(device_y, host_y, sizeof(float) * N, hipMemcpyHostToDevice));
#ifdef CSR
CUDA_ERR(hipMalloc((void**)&device_JR, sizeof(int) * (M + 1)));
CUDA_ERR(hipMalloc((void**)&device_JC, sizeof(int) * NZ));
CUDA_ERR(hipMemcpy(device_JR, host_JR, sizeof(int) * (M + 1), hipMemcpyHostToDevice));
CUDA_ERR(hipMemcpy(device_JC, host_JC, sizeof(int) * NZ, hipMemcpyHostToDevice));
#endif
#ifdef CUSPARSE
#ifndef CSR // when COO
CUSPARSE_ERR(hipsparseCreateCoo(&sp_mtx,
M, N, NZ, device_JR, device_JC, device_AA_sorted,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ONE, HIP_R_32F));
#endif
#ifdef CSR
CUSPARSE_ERR(hipsparseCreateCsr(&sp_mtx,
M, N, NZ, device_JR, device_JC, device_AA_sorted,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ONE, HIP_R_32F));
#endif
CUSPARSE_ERR(hipsparseCreateDnVec(&dn_x, N, device_x, HIP_R_32F));
CUSPARSE_ERR(hipsparseCreateDnVec(&dn_y, M, device_y, HIP_R_32F));
#ifndef CSR // when COO
CUSPARSE_ERR(hipsparseSpMV_bufferSize(
handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, sp_mtx, dn_x, &beta, dn_y, HIP_R_32F,
HIPSPARSE_COOMV_ALG, &buffer_size));
#else // when CSR
CUSPARSE_ERR(hipsparseSpMV_bufferSize(
handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, sp_mtx, dn_x, &beta, dn_y, HIP_R_32F,
HIPSPARSE_CSRMV_ALG1, &buffer_size));
#endif
CUDA_ERR(hipMalloc(&buffer, buffer_size));
#endif
printf("Iteration start.\n");
for (register int i = 0; i < test_iterations; i++) {
CUDA_ERR(hipMemcpy(device_y, host_y, sizeof(float) * N, hipMemcpyHostToDevice)); // set to all zeros
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start); // Timer start
// ---- Step 9. Do SpMV ----
#ifdef CUSPARSE
#ifndef CSR
CUSPARSE_ERR(hipsparseSpMV(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, sp_mtx, dn_x, &beta, dn_y, HIP_R_32F,
HIPSPARSE_COOMV_ALG, buffer));
#else
CUSPARSE_ERR(hipsparseSpMV(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, sp_mtx, dn_x, &beta, dn_y, HIP_R_32F,
HIPSPARSE_CSRMV_ALG1, buffer));
#endif
#else // Kernel function implementation
#ifdef SCALAR_KERNEL
// find the minimum block
int block_num = 1;
int thread_num = M;
if (M > 1024) {
while (block_num * 1024 < M) block_num++;
thread_num = 1024;
}
printf("Block %d, Threads %d\n", block_num, thread_num);
hipLaunchKernelGGL(( ker_csr_spmv_scalar), dim3(block_num), dim3(thread_num), 0, 0, device_JR, device_JC, device_AA_sorted, device_x, device_y);
#endif
#ifdef VECTOR_KERNEL
// find the minimum block
int block_num = 1;
int thread_num = M * 32;
if (M * 32 > 1024) {
while (block_num * 1024 < 32 * M) block_num++;
thread_num = 1024;
}
printf("Block %d, Threads %d\n", block_num, thread_num);
hipLaunchKernelGGL(( ker_csr_spmv_vector), dim3(block_num), dim3(thread_num), 0, 0, device_JR, device_JC, device_AA_sorted, device_x, device_y);
#endif
#endif
// Record
hipEventRecord(stop); // timer end
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
if (i != 0) printf(" Iter %3d, Cached, Elapsed: %fms\n", i + 1, elapsed);
else printf(" Iter %3d, Elapsed: %fms\n", i + 1, elapsed);
average += elapsed;
elapsed = 0;
}
printf("Iteration end.\n");
printf(" Average elapsed time: %lf\n", average / test_iterations);
// ---- Step 10. Fetch the result ----
CUDA_ERR(hipMemcpy(host_y, device_y, N * sizeof(float), hipMemcpyDeviceToHost));
printf("Host memory check...\nhost_y: "); for (int i = 0; i < 10; i++) printf("%9.1f", host_y[i]); printf("\n");
// ---- Step 11. Destroy ----
#ifdef CUSPARSE
CUSPARSE_ERR(hipsparseDestroySpMat(sp_mtx));
CUSPARSE_ERR(hipsparseDestroyDnVec(dn_x));
CUSPARSE_ERR(hipsparseDestroyDnVec(dn_y));
#endif
// ---- Step 12. Return resources ----
if (device_JR) hipFree(device_JR);
if (device_JC) hipFree(device_JC);
if (device_AA_sorted) hipFree(device_AA_sorted);
if (device_x) hipFree(device_x);
if (device_y) hipFree(device_y);
if (buffer) hipFree(buffer);
if (handle) hipsparseDestroy(handle);
hipEventDestroy(start);
hipEventDestroy(stop);
}
free(host_JR);
free(host_JC);
free(host_AA);
if (hipDeviceReset() != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
| 8072907ee55bc5e1445631564adb2895756fd94b.cu | // Week 4
// cuSPARSE vs Kernel Testbench.
// [email protected]
// written by SukJoon Oh
#include <cuda_runtime.h>
// #include <device_launch_parameters.h>
#include <cusparse.h> // cusparseSpMV
#include <stdio.h>
#include <stdlib.h>
#include "mmio.h"
#include "errchk.cuh"
#include "spmv_kernel.cuh"
// Option settings
// #define COO
#define CSR
#define CUSPARSE
// #define SCALAR_KERNEL
// #define VECTOR_KERNEL
// ---- main() ----
// Entry
int main(int argc, char* argv[])
{
int test_iterations = 0;
int N = 0;
int M = 0;
int NZ = 0;
int* host_JR = NULL;
int* host_JC = NULL;
float* host_AA = NULL;
int* host_P = NULL;
int* device_JR = NULL;
int* device_JC = NULL;
float* device_AA = NULL;
float* device_AA_sorted = NULL;
int* device_P = NULL;
void* buffer = NULL;
size_t buffer_size = 0;
cusparseHandle_t handle = NULL;
cudaStream_t stream = NULL;
if (argc == 1 || argc == 2) { printf("Too few arguments.\nProgram exit.\n"); exit(0); }
if (argc >= 4) { printf("Too many argmuments.\nProgram exit.\n"); exit(0); }
test_iterations = atoi(argv[1]);
printf("(arg1) Target iterations: %d\n", test_iterations);
printf("(arg2) File name: %s\n", argv[2]);
//
// Reading file
{
FILE* MTX;
MTX = fopen(argv[2], "r");
MM_typecode matrix_code;
printf("Reading %s... \n", argv[2]);
// Read banner, type, etc essential infos
// Verification steps are ignored.
if (mm_read_banner(MTX, &matrix_code) != 0) exit(1);
mm_read_mtx_crd_size(MTX, &M, &N, &NZ); // Over max 1025
host_JR = (int*)malloc(NZ * sizeof(int));
host_JC = (int*)malloc(NZ * sizeof(int));
host_AA = (float*)malloc(NZ * sizeof(float));
host_P = (int*)malloc(NZ * sizeof(int));
for (register int i = 0; i < NZ; i++)
fscanf(MTX, "%d %d %f\n", &host_JR[i], &host_JC[i], &host_AA[i]);
fclose(MTX);
}
// ---- Step 1. Load info ----
printf("(File info)\tm : %d, n : %d, nz : %d\n", M, N, NZ);
printf("Printing samples...\n");
printf("JR: "); for (register int i = 0; i < 10; i++) printf("%6.0d", host_JR[i]); printf("\n");
printf("JC: "); for (register int i = 0; i < 10; i++) printf("%6.0d", host_JC[i]); printf("\n");
printf("AA: "); for (register int i = 0; i < 10; i++) printf("%6.0lf", host_AA[i]); printf("\n");
printf("File successfully loaded.\n");
// ---- Step 2. Handle create, bind a stream ----
printf("Preparing for cusparseXcoosort...\n");
CUDA_ERR(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
CUSPARSE_ERR(cusparseCreate(&handle));
CUSPARSE_ERR(cusparseSetStream(handle, stream));
// ---- Step 3. Allocate Buffer ----
CUSPARSE_ERR(cusparseXcoosort_bufferSizeExt(handle, M, N, NZ, device_JR, device_JC, &buffer_size));
printf("Buffer allocation for cusparseXcoosortByRow : %ld Byte\n", buffer_size);
CUDA_ERR(cudaMalloc((void**)&device_JR, sizeof(int) * NZ));
CUDA_ERR(cudaMalloc((void**)&device_JC, sizeof(int) * NZ));
CUDA_ERR(cudaMalloc((void**)&device_P, sizeof(int) * NZ));
CUDA_ERR(cudaMalloc((void**)&device_AA, sizeof(float) * NZ));
CUDA_ERR(cudaMalloc((void**)&device_AA_sorted, sizeof(float) * NZ));
CUDA_ERR(cudaMalloc((void**)&buffer, sizeof(char) * buffer_size));
CUDA_ERR(cudaMemcpy(device_JR, host_JR, sizeof(int) * NZ, cudaMemcpyHostToDevice));
CUDA_ERR(cudaMemcpy(device_JC, host_JC, sizeof(int) * NZ, cudaMemcpyHostToDevice));
CUDA_ERR(cudaMemcpy(device_AA, host_AA, sizeof(float) * NZ, cudaMemcpyHostToDevice));
CUDA_ERR(cudaDeviceSynchronize());
printf("Allocation/Memcopy to GPU done.\n");
// ---- Step 4. Setup permutation vector P to Identity ----
CUSPARSE_ERR(cusparseCreateIdentityPermutation(handle, NZ, device_P));
// ---- Step 5. Sort ----
CUSPARSE_ERR(cusparseXcoosortByRow(handle, M, N, NZ, device_JR, device_JC, device_P, buffer));
printf("cusparseXcoosortByRow done.\n");
// Gather
CUSPARSE_ERR(cusparseSgthr(handle, NZ, device_AA, device_AA_sorted, device_P, CUSPARSE_INDEX_BASE_ZERO));
CUDA_ERR(cudaDeviceSynchronize());
// Fetch back
CUDA_ERR(cudaMemcpy(host_JR, device_JR, sizeof(int) * NZ, cudaMemcpyDeviceToHost));
CUDA_ERR(cudaMemcpy(host_JC, device_JC, sizeof(int) * NZ, cudaMemcpyDeviceToHost));
CUDA_ERR(cudaMemcpy(host_P, device_P, sizeof(int) * NZ, cudaMemcpyDeviceToHost));
CUDA_ERR(cudaMemcpy(host_AA, device_AA_sorted, sizeof(float) * NZ, cudaMemcpyDeviceToHost));
CUDA_ERR(cudaDeviceSynchronize());
// Free memories
if (device_P) cudaFree(device_P);
if (device_AA) cudaFree(device_AA);
if (buffer) cudaFree(buffer);
if (handle) cusparseDestroy(handle);
if (stream) cudaStreamDestroy(stream);
free(host_P); // Unnecessary
printf("Printing sorted values...\n");
printf("JR: "); for (register int i = 0; i < 10; i++) printf("%6.0d", host_JR[i]); printf("\n");
printf("JC: "); for (register int i = 0; i < 10; i++) printf("%6.0d", host_JC[i]); printf("\n");
printf("AA: "); for (register int i = 0; i < 10; i++) printf("%6.0lf", host_AA[i]); printf("\n");
#ifdef CSR
printf("Converting COO to CSR...\n");
if (device_JR) cudaFree(device_JR);
if (device_JC) cudaFree(device_JC);
int* t_JR = (int*)calloc((M + 1), sizeof(int));
for (int i = 0; i < M + 1; i++) t_JR[i]++;
for (int i = 0; i < NZ; i++) t_JR[host_JR[i]]++;
for (int i = 0; i < M; i++) t_JR[i + 1] += (t_JR[i] - 1);
free(host_JR);
host_JR = t_JR; // switch
printf("Done.\n");
printf("JR: "); for (register int i = 0; i < 10; i++) printf("%6.0d", host_JR[i]); printf("\n");
printf("JC: "); for (register int i = 0; i < 10; i++) printf("%6.0d", host_JC[i]); printf("\n");
printf("AA: "); for (register int i = 0; i < 10; i++) printf("%6.0lf", host_AA[i]); printf("\n");
#endif
{
handle = NULL;
buffer = NULL;
buffer_size = 0;
float average = 0;
float elapsed = 0;
cudaEvent_t start, stop;
#ifdef CUSPARSE
printf("Test: CUSPARSE\n");
#else
printf("Test: KERNEL\n");
#endif
// ---- Step 7. Define variables
const float alpha = 1;
const float beta = 0;
float host_y[N] = {0, };
float host_x[M];
for (auto& elem : host_x) elem = 1; // Set x to have all ones.
float* device_x = NULL;
float* device_y = NULL;
cusparseSpMatDescr_t sp_mtx; // device
cusparseDnVecDescr_t dn_x, dn_y; // device
CUSPARSE_ERR(cusparseCreate(&handle));
CUDA_ERR(cudaMalloc((void**)&device_x, sizeof(float) * M));
CUDA_ERR(cudaMalloc((void**)&device_y, sizeof(float) * N));
CUDA_ERR(cudaMemcpy(device_x, host_x, sizeof(float) * M, cudaMemcpyHostToDevice));
CUDA_ERR(cudaMemcpy(device_y, host_y, sizeof(float) * N, cudaMemcpyHostToDevice));
#ifdef CSR
CUDA_ERR(cudaMalloc((void**)&device_JR, sizeof(int) * (M + 1)));
CUDA_ERR(cudaMalloc((void**)&device_JC, sizeof(int) * NZ));
CUDA_ERR(cudaMemcpy(device_JR, host_JR, sizeof(int) * (M + 1), cudaMemcpyHostToDevice));
CUDA_ERR(cudaMemcpy(device_JC, host_JC, sizeof(int) * NZ, cudaMemcpyHostToDevice));
#endif
#ifdef CUSPARSE
#ifndef CSR // when COO
CUSPARSE_ERR(cusparseCreateCoo(&sp_mtx,
M, N, NZ, device_JR, device_JC, device_AA_sorted,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ONE, CUDA_R_32F));
#endif
#ifdef CSR
CUSPARSE_ERR(cusparseCreateCsr(&sp_mtx,
M, N, NZ, device_JR, device_JC, device_AA_sorted,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ONE, CUDA_R_32F));
#endif
CUSPARSE_ERR(cusparseCreateDnVec(&dn_x, N, device_x, CUDA_R_32F));
CUSPARSE_ERR(cusparseCreateDnVec(&dn_y, M, device_y, CUDA_R_32F));
#ifndef CSR // when COO
CUSPARSE_ERR(cusparseSpMV_bufferSize(
handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, sp_mtx, dn_x, &beta, dn_y, CUDA_R_32F,
CUSPARSE_COOMV_ALG, &buffer_size));
#else // when CSR
CUSPARSE_ERR(cusparseSpMV_bufferSize(
handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, sp_mtx, dn_x, &beta, dn_y, CUDA_R_32F,
CUSPARSE_CSRMV_ALG1, &buffer_size));
#endif
CUDA_ERR(cudaMalloc(&buffer, buffer_size));
#endif
printf("Iteration start.\n");
for (register int i = 0; i < test_iterations; i++) {
CUDA_ERR(cudaMemcpy(device_y, host_y, sizeof(float) * N, cudaMemcpyHostToDevice)); // set to all zeros
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start); // Timer start
// ---- Step 9. Do SpMV ----
#ifdef CUSPARSE
#ifndef CSR
CUSPARSE_ERR(cusparseSpMV(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, sp_mtx, dn_x, &beta, dn_y, CUDA_R_32F,
CUSPARSE_COOMV_ALG, buffer));
#else
CUSPARSE_ERR(cusparseSpMV(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, sp_mtx, dn_x, &beta, dn_y, CUDA_R_32F,
CUSPARSE_CSRMV_ALG1, buffer));
#endif
#else // Kernel function implementation
#ifdef SCALAR_KERNEL
// find the minimum block
int block_num = 1;
int thread_num = M;
if (M > 1024) {
while (block_num * 1024 < M) block_num++;
thread_num = 1024;
}
printf("Block %d, Threads %d\n", block_num, thread_num);
ker_csr_spmv_scalar<<<block_num, thread_num>>>(device_JR, device_JC, device_AA_sorted, device_x, device_y);
#endif
#ifdef VECTOR_KERNEL
// find the minimum block
int block_num = 1;
int thread_num = M * 32;
if (M * 32 > 1024) {
while (block_num * 1024 < 32 * M) block_num++;
thread_num = 1024;
}
printf("Block %d, Threads %d\n", block_num, thread_num);
ker_csr_spmv_vector<<<block_num, thread_num>>>(device_JR, device_JC, device_AA_sorted, device_x, device_y);
#endif
#endif
// Record
cudaEventRecord(stop); // timer end
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
if (i != 0) printf(" Iter %3d, Cached, Elapsed: %fms\n", i + 1, elapsed);
else printf(" Iter %3d, Elapsed: %fms\n", i + 1, elapsed);
average += elapsed;
elapsed = 0;
}
printf("Iteration end.\n");
printf(" Average elapsed time: %lf\n", average / test_iterations);
// ---- Step 10. Fetch the result ----
CUDA_ERR(cudaMemcpy(host_y, device_y, N * sizeof(float), cudaMemcpyDeviceToHost));
printf("Host memory check...\nhost_y: "); for (int i = 0; i < 10; i++) printf("%9.1f", host_y[i]); printf("\n");
// ---- Step 11. Destroy ----
#ifdef CUSPARSE
CUSPARSE_ERR(cusparseDestroySpMat(sp_mtx));
CUSPARSE_ERR(cusparseDestroyDnVec(dn_x));
CUSPARSE_ERR(cusparseDestroyDnVec(dn_y));
#endif
// ---- Step 12. Return resources ----
if (device_JR) cudaFree(device_JR);
if (device_JC) cudaFree(device_JC);
if (device_AA_sorted) cudaFree(device_AA_sorted);
if (device_x) cudaFree(device_x);
if (device_y) cudaFree(device_y);
if (buffer) cudaFree(buffer);
if (handle) cusparseDestroy(handle);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
free(host_JR);
free(host_JC);
free(host_AA);
if (cudaDeviceReset() != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
5ec62d81cac6225feae4c44d4fc8750104f46801.hip | // !!! This is a file automatically generated by hipify!!!
/*
Fractal v1.3: This code computes a fractal from the Mandelbrot set.
Copyright (c) 2015, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted for academic, research, experimental, or personal use provided
that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
For all other uses, please contact the Office for Commercialization and Industry
Relations at Texas State University <http://www.txstate.edu/ocir/>.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher <[email protected]>
*/
#include <stdlib.h>
#include <stdio.h>
#include <malloc.h>
#include <assert.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
// #define THREADS 256
#define delta 0.00578
#define xMin 0.74395
#define xMax (xMin + delta)
#define yMin 0.10461
#define yMax (yMin + delta)
static void WriteBMP(const int x, const int y, const unsigned char* const bmp, const char* const name)
{
const unsigned char bmphdr[54] = {66, 77, 255, 255, 255, 255, 0, 0, 0, 0, 54, 4, 0, 0, 40, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 1, 0, 8, 0, 0, 0, 0, 0, 255, 255, 255, 255, 196, 14, 0, 0, 196, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
unsigned char hdr[1078];
int i, j, c, xcorr, diff;
FILE* f;
xcorr = (x + 3) >> 2 << 2; // BMPs have to be a multiple of 4 pixels wide.
diff = xcorr - x;
for (i = 0; i < 54; i++) hdr[i] = bmphdr[i];
*((int*)(&hdr[18])) = xcorr;
*((int*)(&hdr[22])) = y;
*((int*)(&hdr[34])) = xcorr * y;
*((int*)(&hdr[2])) = xcorr * y + 1078;
for (i = 0; i < 256; i++) {
j = i * 4 + 54;
hdr[j+0] = i; // blue
hdr[j+1] = i; // green
hdr[j+2] = i; // red
hdr[j+3] = 0; // dummy
}
f = fopen(name, "wb");
assert(f != NULL);
c = fwrite(hdr, 1, 1078, f);
assert(c == 1078);
if (diff == 0) {
c = fwrite(bmp, 1, x * y, f);
assert(c == x * y);
} else {
*((int*)(&hdr[0])) = 0; // need up to three zero bytes
for (j = 0; j < y; j++) {
c = fwrite(&bmp[j * x], 1, x, f);
assert(c == x);
c = fwrite(hdr, 1, diff, f);
assert(c == diff);
}
}
fclose(f);
}
static __global__ void FractalKernel(const int width, const int maxdepth, const double dx, const double dy, unsigned char* const cnt_d)
{
int idx, row, col, depth;
double cx, cy, x, y, x2, y2;
idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < width * width) {
col = idx % width;
row = idx / width;
cx = xMin + col * dx;
cy = yMin + row * dy;
x = -cx;
y = -cy;
depth = maxdepth;
do {
x2 = x * x;
y2 = y * y;
y = 2 * x * y - cy;
x = x2 - y2 - cx;
depth--;
} while ((depth > 0) && ((x2 + y2) <= 5.0));
cnt_d[row * width + col] = depth & 255;
}
}
int main(int argc, char *argv[])
{
int width, maxdepth;
double dx, dy;
unsigned char *cnt, *cnt_d;
struct timeval start, end;
printf("Fractal v1.3 [CUDA]\n");
// check command line
if (argc != 3) {fprintf(stderr, "usage: %s edge_length max_depth\n", argv[0]); exit(-1);}
width = atoi(argv[1]);
if (width < 10) {fprintf(stderr, "edge_length must be at least 10\n"); exit(-1);}
maxdepth = atoi(argv[2]);
if (maxdepth < 10) {fprintf(stderr, "max_depth must be at least 10\n"); exit(-1);}
printf("computing %d by %d fractal with a maximum depth of %d\n", width, width, maxdepth);
// allocate array
cnt = (unsigned char *)malloc(width * width * sizeof(unsigned char));
if (cnt == NULL) {fprintf(stderr, "could not allocate memory\n"); exit(-1);}
if (hipSuccess != hipMalloc((void **)&cnt_d, width * width * sizeof(unsigned char))) fprintf(stderr, "could not allocate array\n");
// start time
gettimeofday(&start, NULL);
// compute fractal
dx = (xMax - xMin) / width;
dy = (yMax - yMin) / width;
hipLaunchKernelGGL(( FractalKernel), dim3((width * width + THREADS - 1) / THREADS), dim3(THREADS), 0, 0, width, maxdepth, dx, dy, cnt_d);
if (hipSuccess != hipMemcpy(cnt, cnt_d, width * width * sizeof(unsigned char), hipMemcpyDeviceToHost)) fprintf(stderr, "copying from device failed\n");
// end time
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("compute time: %.4f s\n", runtime);
// verify result by writing it to a file
if (width <= 1024) {
WriteBMP(width, width, cnt, "fractal.bmp");
}
free(cnt);
hipFree(cnt_d);
return 0;
}
| 5ec62d81cac6225feae4c44d4fc8750104f46801.cu | /*
Fractal v1.3: This code computes a fractal from the Mandelbrot set.
Copyright (c) 2015, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted for academic, research, experimental, or personal use provided
that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
For all other uses, please contact the Office for Commercialization and Industry
Relations at Texas State University <http://www.txstate.edu/ocir/>.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher <[email protected]>
*/
#include <stdlib.h>
#include <stdio.h>
#include <malloc.h>
#include <assert.h>
#include <sys/time.h>
#include <cuda.h>
// #define THREADS 256
#define delta 0.00578
#define xMin 0.74395
#define xMax (xMin + delta)
#define yMin 0.10461
#define yMax (yMin + delta)
static void WriteBMP(const int x, const int y, const unsigned char* const bmp, const char* const name)
{
const unsigned char bmphdr[54] = {66, 77, 255, 255, 255, 255, 0, 0, 0, 0, 54, 4, 0, 0, 40, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 1, 0, 8, 0, 0, 0, 0, 0, 255, 255, 255, 255, 196, 14, 0, 0, 196, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
unsigned char hdr[1078];
int i, j, c, xcorr, diff;
FILE* f;
xcorr = (x + 3) >> 2 << 2; // BMPs have to be a multiple of 4 pixels wide.
diff = xcorr - x;
for (i = 0; i < 54; i++) hdr[i] = bmphdr[i];
*((int*)(&hdr[18])) = xcorr;
*((int*)(&hdr[22])) = y;
*((int*)(&hdr[34])) = xcorr * y;
*((int*)(&hdr[2])) = xcorr * y + 1078;
for (i = 0; i < 256; i++) {
j = i * 4 + 54;
hdr[j+0] = i; // blue
hdr[j+1] = i; // green
hdr[j+2] = i; // red
hdr[j+3] = 0; // dummy
}
f = fopen(name, "wb");
assert(f != NULL);
c = fwrite(hdr, 1, 1078, f);
assert(c == 1078);
if (diff == 0) {
c = fwrite(bmp, 1, x * y, f);
assert(c == x * y);
} else {
*((int*)(&hdr[0])) = 0; // need up to three zero bytes
for (j = 0; j < y; j++) {
c = fwrite(&bmp[j * x], 1, x, f);
assert(c == x);
c = fwrite(hdr, 1, diff, f);
assert(c == diff);
}
}
fclose(f);
}
static __global__ void FractalKernel(const int width, const int maxdepth, const double dx, const double dy, unsigned char* const cnt_d)
{
int idx, row, col, depth;
double cx, cy, x, y, x2, y2;
idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < width * width) {
col = idx % width;
row = idx / width;
cx = xMin + col * dx;
cy = yMin + row * dy;
x = -cx;
y = -cy;
depth = maxdepth;
do {
x2 = x * x;
y2 = y * y;
y = 2 * x * y - cy;
x = x2 - y2 - cx;
depth--;
} while ((depth > 0) && ((x2 + y2) <= 5.0));
cnt_d[row * width + col] = depth & 255;
}
}
int main(int argc, char *argv[])
{
int width, maxdepth;
double dx, dy;
unsigned char *cnt, *cnt_d;
struct timeval start, end;
printf("Fractal v1.3 [CUDA]\n");
// check command line
if (argc != 3) {fprintf(stderr, "usage: %s edge_length max_depth\n", argv[0]); exit(-1);}
width = atoi(argv[1]);
if (width < 10) {fprintf(stderr, "edge_length must be at least 10\n"); exit(-1);}
maxdepth = atoi(argv[2]);
if (maxdepth < 10) {fprintf(stderr, "max_depth must be at least 10\n"); exit(-1);}
printf("computing %d by %d fractal with a maximum depth of %d\n", width, width, maxdepth);
// allocate array
cnt = (unsigned char *)malloc(width * width * sizeof(unsigned char));
if (cnt == NULL) {fprintf(stderr, "could not allocate memory\n"); exit(-1);}
if (cudaSuccess != cudaMalloc((void **)&cnt_d, width * width * sizeof(unsigned char))) fprintf(stderr, "could not allocate array\n");
// start time
gettimeofday(&start, NULL);
// compute fractal
dx = (xMax - xMin) / width;
dy = (yMax - yMin) / width;
FractalKernel<<<(width * width + THREADS - 1) / THREADS, THREADS>>>(width, maxdepth, dx, dy, cnt_d);
if (cudaSuccess != cudaMemcpy(cnt, cnt_d, width * width * sizeof(unsigned char), cudaMemcpyDeviceToHost)) fprintf(stderr, "copying from device failed\n");
// end time
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("compute time: %.4f s\n", runtime);
// verify result by writing it to a file
if (width <= 1024) {
WriteBMP(width, width, cnt, "fractal.bmp");
}
free(cnt);
cudaFree(cnt_d);
return 0;
}
|
93bb86d8cabd8996821faf11e5debee6492f31a4.hip | // !!! This is a file automatically generated by hipify!!!
#include <bits/stdc++.h>
#include <hip/hip_runtime.h>
#include <highgui.h>
#include <cv.h>
#define Mask_size 3
//#define TILE_size_of_rgb 1024
#define BLOCKSIZE 32
#define TILE_SIZE 32
using namespace std;
using namespace cv;
__constant__ char Global_Mask[Mask_size*Mask_size];
__device__ unsigned char clamp(int value){
if(value < 0)
value = 0;
else
if(value > 255)
value = 255;
return value;
}
__global__ void sobelFilter(unsigned char *In, int Row, int Col, unsigned int Mask_Width,char *Mask,unsigned char *Out){
unsigned int row = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int col = blockIdx.x*blockDim.x+threadIdx.x;
int Pvalue = 0;
int N_start_point_row = row - (Mask_Width/2);
int N_start_point_col = col - (Mask_Width/2);
for(int i = 0; i < Mask_Width; i++){
for(int j = 0; j < Mask_Width; j++ ){
if((N_start_point_col + j >=0 && N_start_point_col + j < Row)&&(N_start_point_row + i >=0 && N_start_point_row + i < Col)){
Pvalue += In[(N_start_point_row + i)*Row+(N_start_point_col + j)] * Mask[i*Mask_Width+j];
}
}
}
Out[row*Row+col] = clamp(Pvalue);
}
__global__ void sobelFilterConstant(unsigned char *In, int Row, int Col, unsigned int Mask_Width,unsigned char *Out){
unsigned int row = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int col = blockIdx.x*blockDim.x+threadIdx.x;
int Pvalue = 0;
int N_start_point_row = row - (Mask_Width/2);
int N_start_point_col = col - (Mask_Width/2);
for(int i = 0; i < Mask_Width; i++){
for(int j = 0; j < Mask_Width; j++ ){
if((N_start_point_col + j >=0 && N_start_point_col + j < Row)&&(N_start_point_row + i >=0 && N_start_point_row + i < Col)){
Pvalue += In[(N_start_point_row + i)*Row+(N_start_point_col + j)] * Global_Mask[i*Mask_Width+j];
}
}
}
Out[row*Row+col] = clamp(Pvalue);
}
__global__ void sobelFilterShared(unsigned char *In, unsigned char *Out,int maskWidth, int width, int height){
__shared__ float N_ds[TILE_SIZE + Mask_size - 1][TILE_SIZE+ Mask_size - 1];
int n = Mask_size/2;
int dest = threadIdx.y*TILE_SIZE+threadIdx.x, destY = dest / (TILE_SIZE+Mask_size-1), destX = dest % (TILE_SIZE+Mask_size-1),
srcY = blockIdx.y * TILE_SIZE + destY - n, srcX = blockIdx.x * TILE_SIZE + destX - n,
src = (srcY * width + srcX);
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = In[src];
else
N_ds[destY][destX] = 0;
// Second batch loading
dest = threadIdx.y * TILE_SIZE + threadIdx.x + TILE_SIZE * TILE_SIZE;
destY = dest /(TILE_SIZE + Mask_size - 1), destX = dest % (TILE_SIZE + Mask_size - 1);
srcY = blockIdx.y * TILE_SIZE + destY - n;
srcX = blockIdx.x * TILE_SIZE + destX - n;
src = (srcY * width + srcX);
if (destY < TILE_SIZE + Mask_size - 1) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = In[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
int accum = 0;
int y, x;
for (y = 0; y < maskWidth; y++)
for (x = 0; x < maskWidth; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] * Global_Mask[y * maskWidth + x];
y = blockIdx.y * TILE_SIZE + threadIdx.y;
x = blockIdx.x * TILE_SIZE + threadIdx.x;
if (y < height && x < width)
Out[(y * width + x)] = clamp(accum);
__syncthreads();
}
__global__ void gray(unsigned char *In, unsigned char *Out,int Row, int Col){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < Col) && (col < Row)){
Out[row*Row+col] = In[(row*Row+col)*3+2]*0.299 + In[(row*Row+col)*3+1]*0.587+ In[(row*Row+col)*3]*0.114;
}
}
// :::::::::::::::::::::::::::::::::::Clock Function::::::::::::::::::::::::::::
double diffclock(clock_t clock1,clock_t clock2){
double diffticks=clock2-clock1;
double diffms=(diffticks)/(CLOCKS_PER_SEC/1); // /1000 mili
return diffms;
}
void d_convolution2d(Mat image,unsigned char *In,unsigned char *h_Out,char *h_Mask,int Mask_Width,int Row,int Col,int op){
// Variables
int size_of_rgb = sizeof(unsigned char)*Row*Col*image.channels();
int size_of_Gray = sizeof(unsigned char)*Row*Col; // sin canales alternativos
int Mask_size_of_bytes = sizeof(char)*(Mask_size*Mask_size);
unsigned char *d_In,*d_Out,*d_sobelOut;
char *d_Mask;
float Blocksize=BLOCKSIZE;
// Memory Allocation in device
hipMalloc((void**)&d_In,size_of_rgb);
hipMalloc((void**)&d_Out,size_of_Gray);
hipMalloc((void**)&d_Mask,Mask_size_of_bytes);
hipMalloc((void**)&d_sobelOut,size_of_Gray);
// Memcpy Host to device
hipMemcpy(d_In,In,size_of_rgb, hipMemcpyHostToDevice);
hipMemcpy(d_Mask,h_Mask,Mask_size_of_bytes,hipMemcpyHostToDevice);
hipMemcpyToSymbol(Global_Mask,h_Mask,Mask_size_of_bytes); // avoid cache coherence
// Thread logic and Kernel call
dim3 dimGrid(ceil(Row/Blocksize),ceil(Col/Blocksize),1);
dim3 dimBlock(Blocksize,Blocksize,1);
hipLaunchKernelGGL(( gray), dim3(dimGrid),dim3(dimBlock), 0, 0, d_In,d_Out,Row,Col); // pasando a escala de grices.
hipDeviceSynchronize();
if(op==1){
hipLaunchKernelGGL(( sobelFilter), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Out,Row,Col,Mask_size,d_Mask,d_sobelOut);
}
if(op==2){
hipLaunchKernelGGL(( sobelFilterConstant), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Out,Row,Col,Mask_size,d_sobelOut);
}
if(op==3){
hipLaunchKernelGGL(( sobelFilterShared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Out,d_sobelOut,3,Row,Col);
}
// save output result.
hipMemcpy (h_Out,d_sobelOut,size_of_Gray,hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_In);
hipFree(d_Out);
hipFree(d_Mask);
hipFree(d_sobelOut);
}
int main(){
double T1,T2; // Time flags
clock_t start,end;// Time flags
int Mask_Width = Mask_size;
char h_Mask[] = {-1,0,1,-2,0,2,-1,0,1};
Mat image,result_image;
image = imread("inputs/img1.jpg",1);
Size s = image.size();
int Row = s.width;
int Col = s.height;
unsigned char * In = (unsigned char*)malloc( sizeof(unsigned char)*Row*Col*image.channels());
unsigned char * h_Out = (unsigned char *)malloc( sizeof(unsigned char)*Row*Col);
In = image.data;
start = clock();
d_convolution2d(image,In,h_Out,h_Mask,Mask_Width,Row,Col,3);
end = clock();
T1=diffclock(start,end);
cout<<" Result Parallel"<<" At "<<T1<<",Seconds"<<endl;
Mat gray_image_opencv, grad_x, abs_grad_x;
start = clock();
cvtColor(image, gray_image_opencv, CV_BGR2GRAY);
Sobel(gray_image_opencv,grad_x,CV_8UC1,1,0,3,1,0,BORDER_DEFAULT);
convertScaleAbs(grad_x, abs_grad_x);
end = clock();
T2=diffclock(start,end);
cout<<" Result secuential"<<" At "<<T2<<",Seconds"<<endl;
cout<<"Total acceleration "<<T2/T1<<"X"<<endl;
result_image.create(Col,Row,CV_8UC1);
result_image.data = h_Out;
imwrite("./outputs/1088015148.png",result_image);
return 0;
}
| 93bb86d8cabd8996821faf11e5debee6492f31a4.cu | #include <bits/stdc++.h>
#include <cuda.h>
#include <highgui.h>
#include <cv.h>
#define Mask_size 3
//#define TILE_size_of_rgb 1024
#define BLOCKSIZE 32
#define TILE_SIZE 32
using namespace std;
using namespace cv;
__constant__ char Global_Mask[Mask_size*Mask_size];
__device__ unsigned char clamp(int value){
if(value < 0)
value = 0;
else
if(value > 255)
value = 255;
return value;
}
__global__ void sobelFilter(unsigned char *In, int Row, int Col, unsigned int Mask_Width,char *Mask,unsigned char *Out){
unsigned int row = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int col = blockIdx.x*blockDim.x+threadIdx.x;
int Pvalue = 0;
int N_start_point_row = row - (Mask_Width/2);
int N_start_point_col = col - (Mask_Width/2);
for(int i = 0; i < Mask_Width; i++){
for(int j = 0; j < Mask_Width; j++ ){
if((N_start_point_col + j >=0 && N_start_point_col + j < Row)&&(N_start_point_row + i >=0 && N_start_point_row + i < Col)){
Pvalue += In[(N_start_point_row + i)*Row+(N_start_point_col + j)] * Mask[i*Mask_Width+j];
}
}
}
Out[row*Row+col] = clamp(Pvalue);
}
__global__ void sobelFilterConstant(unsigned char *In, int Row, int Col, unsigned int Mask_Width,unsigned char *Out){
unsigned int row = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int col = blockIdx.x*blockDim.x+threadIdx.x;
int Pvalue = 0;
int N_start_point_row = row - (Mask_Width/2);
int N_start_point_col = col - (Mask_Width/2);
for(int i = 0; i < Mask_Width; i++){
for(int j = 0; j < Mask_Width; j++ ){
if((N_start_point_col + j >=0 && N_start_point_col + j < Row)&&(N_start_point_row + i >=0 && N_start_point_row + i < Col)){
Pvalue += In[(N_start_point_row + i)*Row+(N_start_point_col + j)] * Global_Mask[i*Mask_Width+j];
}
}
}
Out[row*Row+col] = clamp(Pvalue);
}
__global__ void sobelFilterShared(unsigned char *In, unsigned char *Out,int maskWidth, int width, int height){
__shared__ float N_ds[TILE_SIZE + Mask_size - 1][TILE_SIZE+ Mask_size - 1];
int n = Mask_size/2;
int dest = threadIdx.y*TILE_SIZE+threadIdx.x, destY = dest / (TILE_SIZE+Mask_size-1), destX = dest % (TILE_SIZE+Mask_size-1),
srcY = blockIdx.y * TILE_SIZE + destY - n, srcX = blockIdx.x * TILE_SIZE + destX - n,
src = (srcY * width + srcX);
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = In[src];
else
N_ds[destY][destX] = 0;
// Second batch loading
dest = threadIdx.y * TILE_SIZE + threadIdx.x + TILE_SIZE * TILE_SIZE;
destY = dest /(TILE_SIZE + Mask_size - 1), destX = dest % (TILE_SIZE + Mask_size - 1);
srcY = blockIdx.y * TILE_SIZE + destY - n;
srcX = blockIdx.x * TILE_SIZE + destX - n;
src = (srcY * width + srcX);
if (destY < TILE_SIZE + Mask_size - 1) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = In[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
int accum = 0;
int y, x;
for (y = 0; y < maskWidth; y++)
for (x = 0; x < maskWidth; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] * Global_Mask[y * maskWidth + x];
y = blockIdx.y * TILE_SIZE + threadIdx.y;
x = blockIdx.x * TILE_SIZE + threadIdx.x;
if (y < height && x < width)
Out[(y * width + x)] = clamp(accum);
__syncthreads();
}
__global__ void gray(unsigned char *In, unsigned char *Out,int Row, int Col){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < Col) && (col < Row)){
Out[row*Row+col] = In[(row*Row+col)*3+2]*0.299 + In[(row*Row+col)*3+1]*0.587+ In[(row*Row+col)*3]*0.114;
}
}
// :::::::::::::::::::::::::::::::::::Clock Function::::::::::::::::::::::::::::
double diffclock(clock_t clock1,clock_t clock2){
double diffticks=clock2-clock1;
double diffms=(diffticks)/(CLOCKS_PER_SEC/1); // /1000 mili
return diffms;
}
void d_convolution2d(Mat image,unsigned char *In,unsigned char *h_Out,char *h_Mask,int Mask_Width,int Row,int Col,int op){
// Variables
int size_of_rgb = sizeof(unsigned char)*Row*Col*image.channels();
int size_of_Gray = sizeof(unsigned char)*Row*Col; // sin canales alternativos
int Mask_size_of_bytes = sizeof(char)*(Mask_size*Mask_size);
unsigned char *d_In,*d_Out,*d_sobelOut;
char *d_Mask;
float Blocksize=BLOCKSIZE;
// Memory Allocation in device
cudaMalloc((void**)&d_In,size_of_rgb);
cudaMalloc((void**)&d_Out,size_of_Gray);
cudaMalloc((void**)&d_Mask,Mask_size_of_bytes);
cudaMalloc((void**)&d_sobelOut,size_of_Gray);
// Memcpy Host to device
cudaMemcpy(d_In,In,size_of_rgb, cudaMemcpyHostToDevice);
cudaMemcpy(d_Mask,h_Mask,Mask_size_of_bytes,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(Global_Mask,h_Mask,Mask_size_of_bytes); // avoid cache coherence
// Thread logic and Kernel call
dim3 dimGrid(ceil(Row/Blocksize),ceil(Col/Blocksize),1);
dim3 dimBlock(Blocksize,Blocksize,1);
gray<<<dimGrid,dimBlock>>>(d_In,d_Out,Row,Col); // pasando a escala de grices.
cudaDeviceSynchronize();
if(op==1){
sobelFilter<<<dimGrid,dimBlock>>>(d_Out,Row,Col,Mask_size,d_Mask,d_sobelOut);
}
if(op==2){
sobelFilterConstant<<<dimGrid,dimBlock>>>(d_Out,Row,Col,Mask_size,d_sobelOut);
}
if(op==3){
sobelFilterShared<<<dimGrid,dimBlock>>>(d_Out,d_sobelOut,3,Row,Col);
}
// save output result.
cudaMemcpy (h_Out,d_sobelOut,size_of_Gray,cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_In);
cudaFree(d_Out);
cudaFree(d_Mask);
cudaFree(d_sobelOut);
}
int main(){
double T1,T2; // Time flags
clock_t start,end;// Time flags
int Mask_Width = Mask_size;
char h_Mask[] = {-1,0,1,-2,0,2,-1,0,1};
Mat image,result_image;
image = imread("inputs/img1.jpg",1);
Size s = image.size();
int Row = s.width;
int Col = s.height;
unsigned char * In = (unsigned char*)malloc( sizeof(unsigned char)*Row*Col*image.channels());
unsigned char * h_Out = (unsigned char *)malloc( sizeof(unsigned char)*Row*Col);
In = image.data;
start = clock();
d_convolution2d(image,In,h_Out,h_Mask,Mask_Width,Row,Col,3);
end = clock();
T1=diffclock(start,end);
cout<<" Result Parallel"<<" At "<<T1<<",Seconds"<<endl;
Mat gray_image_opencv, grad_x, abs_grad_x;
start = clock();
cvtColor(image, gray_image_opencv, CV_BGR2GRAY);
Sobel(gray_image_opencv,grad_x,CV_8UC1,1,0,3,1,0,BORDER_DEFAULT);
convertScaleAbs(grad_x, abs_grad_x);
end = clock();
T2=diffclock(start,end);
cout<<" Result secuential"<<" At "<<T2<<",Seconds"<<endl;
cout<<"Total acceleration "<<T2/T1<<"X"<<endl;
result_image.create(Col,Row,CV_8UC1);
result_image.data = h_Out;
imwrite("./outputs/1088015148.png",result_image);
return 0;
}
|
66f6ff27f210dc2d9ed225b7305846f0ec61f648.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// DeconvSingleInputExecution.cpp
// MNN
//
// Created by MNN on 2022/03/04.
// Copyright 2018, Alibaba Group Holding Limited
//
#include "DeconvSingleInputExecution.hpp"
#include "MNNCUDADefine.hpp"
#include "MNNCUDAFunction.cuh"
namespace MNN {
namespace CUDA {
__global__ void DeconvKernelReorder(const float* B, half* BP, int kw, int kh, int ic, int oc, int icPack) {
int kernelCount = kw * kh;
int e = oc * kernelCount;
int l = ic;
int eDiv = UP_DIV(e, MATMULPACK);
int eAlign = eDiv * MATMULPACK;
int lDiv = UP_DIV(l, icPack);
int lAlign = lDiv * icPack;
int maxCount = eAlign * lAlign;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
int lR = indexO % icPack;
int tmp = indexO / icPack;
int eR = tmp % MATMULPACK;
int tmp2 = tmp / MATMULPACK;
int lC = tmp2 % lDiv;
int eC = tmp2 / lDiv;
half* dst = BP + indexO;
int sL = lC * icPack + lR;//ic_idx
int sE = eC * MATMULPACK + eR;
if (sL >= ic) {
*dst = 0.0;
continue;
}
int oEC = sE / (kernelCount);//oc_idx
int oEk = sE % kernelCount;//khw_idx
if (sE >= e) {
*dst = 0.0;
continue;
}
const float* src = B + sL * kernelCount * oc + oEk + oEC * kernelCount;
*dst = *src;
}
}
__global__ void DeconvInputRerange(const int count,
const InputReorderParameter* param,
const float* Inp,
__half* InpRe
) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
int l = 16 * param->lpack_size;
int h = 16 * param->hpack_size;
int lIndex = i % l;
int hIndex = i / l;
int lU = lIndex / 16;
int lR = lIndex % 16;
int hU = hIndex / 16;
int hR = hIndex % 16;
__half* dst = InpRe + hU * param->lpack_size * 16 * 16 + lU * 16 * 16 + lR + hR * 16;
if(hIndex >= param->h_size) {
dst[0] = (__half)0.0;
break;
}
float value = Inp[(lU*param->h_size + hIndex) * 16 + lR];
dst[0] = value;
}
}
template <typename Dtype>
__global__ void Col2Im(const int n, const Dtype* data_col,
const int batch, const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
const Dtype* bias, Dtype* data_im
) {
const int channel_pack = ((channels+15) / 16);
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < (n); index += blockDim.x * gridDim.x) {
Dtype val = 0;
const int c_p = index / (batch * width * height * 16);
const int idx_tmp = index % (batch * width * height * 16);
const int b_im = idx_tmp / (width * height * 16);
const int hw_16 = idx_tmp % (width * height * 16);
const int c_l = hw_16 % 16;
const int c_im = c_p * 16 + c_l;
const int hw = hw_16 / 16;
const int w_im = hw % width + pad_w;
const int h_im = hw / width + pad_h;
if(c_im >= channels) {
data_im[index] = val;
break;
}
if(nullptr != bias) {
val += bias[c_im];
}
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int data_col_index = ((((c_im * kernel_h + h_k) * kernel_w + w_k) * batch + b_im) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
DeconvSingleInputExecution::Resource::Resource(Backend* bn, const MNN::Op* op) {
mBackend = bn;
auto runtime = static_cast<CUDABackend*>(bn)->getCUDARuntime();
auto conv = op->main_as_Convolution2D();
auto common = conv->common();
mKernelInfo.kernelX = common->kernelX();
mKernelInfo.kernelY = common->kernelY();
mKernelInfo.groups = common->group();
mKernelInfo.strideX = common->strideX();
mKernelInfo.strideY = common->strideY();
mKernelInfo.dilateX = common->dilateX();
mKernelInfo.dilateY = common->dilateY();
mKernelInfo.activationType = common->relu() ? 1 : (common->relu6() ? 2 : 0);
//weight host->device
const float* filterDataPtr = nullptr;
int weightSize = 0;
std::shared_ptr<ConvolutionCommon::Int8Common> quanCommon;
ConvolutionCommon::getConvParameters(&quanCommon, conv, &filterDataPtr, &weightSize);
mKernelInfo.kernelN = common->outputCount();
mKernelInfo.kernelC = weightSize / mKernelInfo.kernelN / mKernelInfo.kernelX / mKernelInfo.kernelY;
MatMulParam param;
int e = mKernelInfo.kernelN * mKernelInfo.kernelX * mKernelInfo.kernelY;
int l = mKernelInfo.kernelC;
int h = 0;
param.elh[0] = e;
param.elh[1] = l;
param.elh[2] = h;
param.elhPack[0] = UP_DIV(e, 16);
param.elhPack[1] = UP_DIV(l, 16);
param.elhPack[2] = UP_DIV(h, 16);
param.aStride[0] = 1;
param.aStride[1] = e;
param.aStride[2] = 0;
param.bStride[0] = 0;
param.bStride[1] = h;
param.bStride[2] = 1;
auto gpuParam = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(sizeof(MatMulParam));
auto tempCacheBuffer = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(weightSize * sizeof(float));
float* cacheWeight = (float*)((uint8_t*)tempCacheBuffer.first + tempCacheBuffer.second);
runtime->memcpy(cacheWeight, filterDataPtr, weightSize * sizeof(float), MNNMemcpyHostToDevice);
runtime->memcpy((uint8_t*)gpuParam.first + gpuParam.second, ¶m, sizeof(MatMulParam), MNNMemcpyHostToDevice);
// Reorder weight
weightTensor.reset(Tensor::createDevice<int16_t>({param.elhPack[0] * param.elhPack[1] * (MATMULPACK * MATMULPACK)}));
bn->onAcquireBuffer(weightTensor.get(), Backend::STATIC);
mFilter = (void *)weightTensor.get()->buffer().device;
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
hipLaunchKernelGGL(( DeconvKernelReorder), dim3(cores), dim3(threadNumbers), 0, 0, (float*)cacheWeight, (half*)mFilter,
mKernelInfo.kernelX, mKernelInfo.kernelY, mKernelInfo.kernelC, mKernelInfo.kernelN, MATMULPACK);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(tempCacheBuffer);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(gpuParam);
// Copy Bias
int biasSize = conv->bias()->size();
biasTensor.reset(Tensor::createDevice<float>({biasSize}));
bn->onAcquireBuffer(biasTensor.get(), Backend::STATIC);
mBias = (void *)biasTensor.get()->buffer().device;
cuda_check(hipMemcpy(mBias, conv->bias()->data(), conv->bias()->size()*sizeof(float), hipMemcpyHostToDevice));
}
DeconvSingleInputExecution::Resource::~Resource() {
// Do nothing
}
DeconvSingleInputExecution::DeconvSingleInputExecution(Backend* backend, const MNN::Op* op, std::shared_ptr<Resource> res) : Execution(backend), mOp(op) {
mResource = res;
auto runtime = static_cast<CUDABackend*>(backend)->getCUDARuntime();
auto staticPool = static_cast<CUDABackend*>(backend)->getStaticBufferPool();
mGpuMatMulParam = staticPool->alloc(sizeof(MatMulParam));
mGpuCol2ImParam = staticPool->alloc(sizeof(Col2ImParameter));
mGpuInpReorderParam = staticPool->alloc(sizeof(InputReorderParameter));
}
DeconvSingleInputExecution::~DeconvSingleInputExecution() {
auto staticPool = static_cast<CUDABackend*>(backend())->getStaticBufferPool();
staticPool->free(mGpuMatMulParam);
staticPool->free(mGpuCol2ImParam);
staticPool->free(mGpuInpReorderParam);
}
bool DeconvSingleInputExecution::onClone(Backend* bn, const Op* op, Execution** dst) {
if (!mValid) {
return false;
}
if (nullptr == dst) {
return true;
}
auto dstExe = new DeconvSingleInputExecution(bn, op, mResource);
*dst = dstExe;
return true;
}
ErrorCode DeconvSingleInputExecution::onResize(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto input = inputs[0], output = outputs[0];
const int UNIT = 1;
auto convCommon = mOp->main_as_Convolution2D()->common();
// Input Rerange Param
mInpReorderParameter.hw_size = input->height() * input->width();
mInpReorderParameter.ic_stride = mInpReorderParameter.hw_size;
mInpReorderParameter.ib_stride = mInpReorderParameter.hw_size * input->channel();
mInpReorderParameter.oc_stride = mInpReorderParameter.ib_stride;
mInpReorderParameter.ob_stride = mInpReorderParameter.hw_size;
mInpReorderParameter.l_size = input->channel();
mInpReorderParameter.h_size = input->batch() * mInpReorderParameter.hw_size;
mInpReorderParameter.lpack_size = UP_DIV(mInpReorderParameter.l_size, 16);
mInpReorderParameter.hpack_size = UP_DIV(mInpReorderParameter.h_size, 16);
runtime->memcpy((uint8_t*)mGpuInpReorderParam.first + mGpuInpReorderParam.second, &mInpReorderParameter, sizeof(InputReorderParameter), MNNMemcpyHostToDevice);
// Col2Im Param
auto pad = ConvolutionCommon::convolutionTransposePad(input, output, mOp->main_as_Convolution2D()->common());
mCol2ImParamter.dilateX = convCommon->dilateX();
mCol2ImParamter.dilateY = convCommon->dilateY();
mCol2ImParamter.strideX = convCommon->strideX();
mCol2ImParamter.strideY = convCommon->strideY();
mCol2ImParamter.ic = input->channel();
mCol2ImParamter.oc = output->channel();
mCol2ImParamter.kernelX = convCommon->kernelX();
mCol2ImParamter.kernelY = convCommon->kernelY();
mCol2ImParamter.padX = pad.first;
mCol2ImParamter.padY = pad.second;
mCol2ImParamter.ih = input->height();
mCol2ImParamter.iw = input->width();
mCol2ImParamter.oh = output->height();
mCol2ImParamter.ow = output->width();
mCol2ImParamter.ob = output->batch();
runtime->memcpy((uint8_t*)mGpuCol2ImParam.first + mGpuCol2ImParam.second, &mCol2ImParamter, sizeof(Col2ImParameter), MNNMemcpyHostToDevice);
// Matmul Param
int e = output->channel() * mCol2ImParamter.kernelX * mCol2ImParamter.kernelY;
int l = input->channel();
int h = input->height() * input->width() * output->batch();
mMatMulParam.elh[0] = e;
mMatMulParam.elh[1] = l;
mMatMulParam.elh[2] = h;
mMatMulParam.elhPack[0] = UP_DIV(e, 16);
mMatMulParam.elhPack[1] = UP_DIV(l, 16);
mMatMulParam.elhPack[2] = UP_DIV(h, 16);
mMatMulParam.bStride[0] = 0;
mMatMulParam.bStride[1] = input->height() * input->width();
mMatMulParam.bStride[2] = 1;
mMatMulParam.cStride[0] = h;
mMatMulParam.cStride[1] = 0;
mMatMulParam.cStride[2] = 1;
mMatMulParam.aPStride[0] = 256 * mMatMulParam.elhPack[1];
mMatMulParam.aPStride[1] = 256;
mMatMulParam.aPStride[2] = 16;
mMatMulParam.bPStride[0] = 256 * mMatMulParam.elhPack[1];
mMatMulParam.bPStride[1] = 256;
mMatMulParam.bPStride[2] = 16;
if (convCommon->relu()) {
mMatMulParam.minValue = 0.0f;
}
if (convCommon->relu6()) {
mMatMulParam.minValue = 0.0f;
mMatMulParam.maxValue = 6.0f;
}
runtime->memcpy((uint8_t*)mGpuMatMulParam.first + mGpuMatMulParam.second, &mMatMulParam, sizeof(MatMulParam), MNNMemcpyHostToDevice);
// Alloc temp cuda memory
auto pool = static_cast<CUDABackend*>(backend())->getBufferPool();
auto buffer1 = pool->alloc(sizeof(float) * mMatMulParam.elhPack[0] * mMatMulParam.elhPack[2]* MATMULPACK * MATMULPACK);
auto buffer2 = pool->alloc(sizeof(__half) * mMatMulParam.elhPack[1] * mMatMulParam.elhPack[2] * MATMULPACK * MATMULPACK);
mIm2ColBuffer = (float*)((uint8_t*)buffer1.first + buffer1.second);
mInputBuffer = (__half*)((uint8_t*)buffer2.first + buffer2.second);
pool->free(buffer2);
pool->free(buffer1);
return NO_ERROR;
}
ErrorCode DeconvSingleInputExecution::onExecute(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) {
//MNN_PRINT("cuda convSingleInput onExecute in, inputsize:%d %d\n", (int)inputs.size(), workspace_size_);
MNN_ASSERT(inputs.size() == 1);
MNN_ASSERT(outputs.size() == 1);
auto bytes = static_cast<CUDABackend*>(backend())->getBytes(inputs[0]);
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
const void *input_addr = (const void*)inputs[0]->deviceId();
const void *filter_addr = mResource->mFilter;
const void *bias_addr = mResource->mBias;
void *output_addr = (void*)outputs[0]->deviceId();
auto gpuInpReorder = (const InputReorderParameter*)((uint8_t*)mGpuInpReorderParam.first + mGpuInpReorderParam.second);
auto gpuCol2Im = (const Col2ImParameter*)((uint8_t*)mGpuCol2ImParam.first + mGpuCol2ImParam.second);
auto gpuMatMul = (const MatMulParam*)((uint8_t*)mGpuMatMulParam.first + mGpuMatMulParam.second);
const int rerangeCount = mInpReorderParameter.lpack_size * mInpReorderParameter.hpack_size * 16 * 16;
int inp_block_num = runtime->blocks_num(rerangeCount);
int inp_thread_num = runtime->threads_num();
// Do input Rerange
//runtime->memset(mInputBuffer, 0, mMatMulParam.elhPack[2] * mMatMulParam.elhPack[1] * MATMULPACK * MATMULPACK * sizeof(__half));
hipLaunchKernelGGL(( DeconvInputRerange), dim3(inp_block_num), dim3(inp_thread_num), 0, 0, rerangeCount, gpuInpReorder, (const float*)input_addr, mInputBuffer);
// Do Gemm operation
GemmPackedMain(runtime, &mMatMulParam, gpuMatMul, (float*)mIm2ColBuffer, (const half*)filter_addr, (const half*)mInputBuffer, nullptr, bytes, false, false);
// Do Col2Im trans
int height_col = mCol2ImParamter.ih;
int width_col = mCol2ImParamter.iw;
int num_kernels = mCol2ImParamter.ob * UP_DIV(mCol2ImParamter.oc, 16) * mCol2ImParamter.oh * mCol2ImParamter.ow * 16;
int col2im_block_num = runtime->blocks_num(num_kernels);
int col2im_thread_num = runtime->threads_num();
// printf("col2im:%d, %d-%d-%d-%d-%d-%d\n %d-%d-%d-%d-%d-%d\n %d-%d\n", mCol2ImParamter.ob, mCol2ImParamter.oh, mCol2ImParamter.ow, mCol2ImParamter.oc, \
// mCol2ImParamter.ih, mCol2ImParamter.iw, mCol2ImParamter.ic, \
// mCol2ImParamter.padX, mCol2ImParamter.padY, mCol2ImParamter.kernelX, mCol2ImParamter.kernelY, mCol2ImParamter.strideX, mCol2ImParamter.strideY, \
// col2im_block_num, col2im_thread_num);
hipLaunchKernelGGL(( Col2Im<float>), dim3(col2im_block_num), dim3(col2im_thread_num), 0, 0,
num_kernels, (const float*)mIm2ColBuffer, mCol2ImParamter.ob, mCol2ImParamter.oh, mCol2ImParamter.ow, mCol2ImParamter.oc,
mCol2ImParamter.kernelY, mCol2ImParamter.kernelX, mCol2ImParamter.padY, mCol2ImParamter.padX,
mCol2ImParamter.strideY, mCol2ImParamter.strideX, mCol2ImParamter.dilateY, mCol2ImParamter.dilateX,
height_col, width_col, (const float*)bias_addr, (float *)output_addr);
return NO_ERROR;
}
class CUDADeconvolutionCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (nullptr != op->main_as_Convolution2D()->quanParameter()) {
auto quan = op->main_as_Convolution2D()->quanParameter();
if (1 == quan->type() || 2 == quan->type()) {
MNN_PRINT("cuda Deconv quant type 1 or 2 not support\n");
return nullptr;
}
}
if(inputs.size() == 3) {
MNN_PRINT("Deconv inputs size:3 not support\n");
return nullptr;
} else if(inputs.size() == 1) {
std::shared_ptr<DeconvSingleInputExecution::Resource> resource(new DeconvSingleInputExecution::Resource(backend, op));
return new DeconvSingleInputExecution(backend, op, resource);
} else {
MNN_PRINT("Deconv inputs size:%d not support", (int)inputs.size());
return nullptr;
}
}
};
CUDACreatorRegister<CUDADeconvolutionCreator> __DeConvExecution(OpType_Deconvolution);
}// namespace CUDA
}// namespace MNN
| 66f6ff27f210dc2d9ed225b7305846f0ec61f648.cu | //
// DeconvSingleInputExecution.cpp
// MNN
//
// Created by MNN on 2022/03/04.
// Copyright © 2018, Alibaba Group Holding Limited
//
#include "DeconvSingleInputExecution.hpp"
#include "MNNCUDADefine.hpp"
#include "MNNCUDAFunction.cuh"
namespace MNN {
namespace CUDA {
__global__ void DeconvKernelReorder(const float* B, half* BP, int kw, int kh, int ic, int oc, int icPack) {
int kernelCount = kw * kh;
int e = oc * kernelCount;
int l = ic;
int eDiv = UP_DIV(e, MATMULPACK);
int eAlign = eDiv * MATMULPACK;
int lDiv = UP_DIV(l, icPack);
int lAlign = lDiv * icPack;
int maxCount = eAlign * lAlign;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
int lR = indexO % icPack;
int tmp = indexO / icPack;
int eR = tmp % MATMULPACK;
int tmp2 = tmp / MATMULPACK;
int lC = tmp2 % lDiv;
int eC = tmp2 / lDiv;
half* dst = BP + indexO;
int sL = lC * icPack + lR;//ic_idx
int sE = eC * MATMULPACK + eR;
if (sL >= ic) {
*dst = 0.0;
continue;
}
int oEC = sE / (kernelCount);//oc_idx
int oEk = sE % kernelCount;//khw_idx
if (sE >= e) {
*dst = 0.0;
continue;
}
const float* src = B + sL * kernelCount * oc + oEk + oEC * kernelCount;
*dst = *src;
}
}
__global__ void DeconvInputRerange(const int count,
const InputReorderParameter* param,
const float* Inp,
__half* InpRe
) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
int l = 16 * param->lpack_size;
int h = 16 * param->hpack_size;
int lIndex = i % l;
int hIndex = i / l;
int lU = lIndex / 16;
int lR = lIndex % 16;
int hU = hIndex / 16;
int hR = hIndex % 16;
__half* dst = InpRe + hU * param->lpack_size * 16 * 16 + lU * 16 * 16 + lR + hR * 16;
if(hIndex >= param->h_size) {
dst[0] = (__half)0.0;
break;
}
float value = Inp[(lU*param->h_size + hIndex) * 16 + lR];
dst[0] = value;
}
}
template <typename Dtype>
__global__ void Col2Im(const int n, const Dtype* data_col,
const int batch, const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
const Dtype* bias, Dtype* data_im
) {
const int channel_pack = ((channels+15) / 16);
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < (n); index += blockDim.x * gridDim.x) {
Dtype val = 0;
const int c_p = index / (batch * width * height * 16);
const int idx_tmp = index % (batch * width * height * 16);
const int b_im = idx_tmp / (width * height * 16);
const int hw_16 = idx_tmp % (width * height * 16);
const int c_l = hw_16 % 16;
const int c_im = c_p * 16 + c_l;
const int hw = hw_16 / 16;
const int w_im = hw % width + pad_w;
const int h_im = hw / width + pad_h;
if(c_im >= channels) {
data_im[index] = val;
break;
}
if(nullptr != bias) {
val += bias[c_im];
}
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int data_col_index = ((((c_im * kernel_h + h_k) * kernel_w + w_k) * batch + b_im) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
DeconvSingleInputExecution::Resource::Resource(Backend* bn, const MNN::Op* op) {
mBackend = bn;
auto runtime = static_cast<CUDABackend*>(bn)->getCUDARuntime();
auto conv = op->main_as_Convolution2D();
auto common = conv->common();
mKernelInfo.kernelX = common->kernelX();
mKernelInfo.kernelY = common->kernelY();
mKernelInfo.groups = common->group();
mKernelInfo.strideX = common->strideX();
mKernelInfo.strideY = common->strideY();
mKernelInfo.dilateX = common->dilateX();
mKernelInfo.dilateY = common->dilateY();
mKernelInfo.activationType = common->relu() ? 1 : (common->relu6() ? 2 : 0);
//weight host->device
const float* filterDataPtr = nullptr;
int weightSize = 0;
std::shared_ptr<ConvolutionCommon::Int8Common> quanCommon;
ConvolutionCommon::getConvParameters(&quanCommon, conv, &filterDataPtr, &weightSize);
mKernelInfo.kernelN = common->outputCount();
mKernelInfo.kernelC = weightSize / mKernelInfo.kernelN / mKernelInfo.kernelX / mKernelInfo.kernelY;
MatMulParam param;
int e = mKernelInfo.kernelN * mKernelInfo.kernelX * mKernelInfo.kernelY;
int l = mKernelInfo.kernelC;
int h = 0;
param.elh[0] = e;
param.elh[1] = l;
param.elh[2] = h;
param.elhPack[0] = UP_DIV(e, 16);
param.elhPack[1] = UP_DIV(l, 16);
param.elhPack[2] = UP_DIV(h, 16);
param.aStride[0] = 1;
param.aStride[1] = e;
param.aStride[2] = 0;
param.bStride[0] = 0;
param.bStride[1] = h;
param.bStride[2] = 1;
auto gpuParam = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(sizeof(MatMulParam));
auto tempCacheBuffer = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(weightSize * sizeof(float));
float* cacheWeight = (float*)((uint8_t*)tempCacheBuffer.first + tempCacheBuffer.second);
runtime->memcpy(cacheWeight, filterDataPtr, weightSize * sizeof(float), MNNMemcpyHostToDevice);
runtime->memcpy((uint8_t*)gpuParam.first + gpuParam.second, ¶m, sizeof(MatMulParam), MNNMemcpyHostToDevice);
// Reorder weight
weightTensor.reset(Tensor::createDevice<int16_t>({param.elhPack[0] * param.elhPack[1] * (MATMULPACK * MATMULPACK)}));
bn->onAcquireBuffer(weightTensor.get(), Backend::STATIC);
mFilter = (void *)weightTensor.get()->buffer().device;
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
DeconvKernelReorder<<<cores, threadNumbers>>>((float*)cacheWeight, (half*)mFilter,
mKernelInfo.kernelX, mKernelInfo.kernelY, mKernelInfo.kernelC, mKernelInfo.kernelN, MATMULPACK);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(tempCacheBuffer);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(gpuParam);
// Copy Bias
int biasSize = conv->bias()->size();
biasTensor.reset(Tensor::createDevice<float>({biasSize}));
bn->onAcquireBuffer(biasTensor.get(), Backend::STATIC);
mBias = (void *)biasTensor.get()->buffer().device;
cuda_check(cudaMemcpy(mBias, conv->bias()->data(), conv->bias()->size()*sizeof(float), cudaMemcpyHostToDevice));
}
DeconvSingleInputExecution::Resource::~Resource() {
// Do nothing
}
DeconvSingleInputExecution::DeconvSingleInputExecution(Backend* backend, const MNN::Op* op, std::shared_ptr<Resource> res) : Execution(backend), mOp(op) {
mResource = res;
auto runtime = static_cast<CUDABackend*>(backend)->getCUDARuntime();
auto staticPool = static_cast<CUDABackend*>(backend)->getStaticBufferPool();
mGpuMatMulParam = staticPool->alloc(sizeof(MatMulParam));
mGpuCol2ImParam = staticPool->alloc(sizeof(Col2ImParameter));
mGpuInpReorderParam = staticPool->alloc(sizeof(InputReorderParameter));
}
DeconvSingleInputExecution::~DeconvSingleInputExecution() {
auto staticPool = static_cast<CUDABackend*>(backend())->getStaticBufferPool();
staticPool->free(mGpuMatMulParam);
staticPool->free(mGpuCol2ImParam);
staticPool->free(mGpuInpReorderParam);
}
bool DeconvSingleInputExecution::onClone(Backend* bn, const Op* op, Execution** dst) {
if (!mValid) {
return false;
}
if (nullptr == dst) {
return true;
}
auto dstExe = new DeconvSingleInputExecution(bn, op, mResource);
*dst = dstExe;
return true;
}
ErrorCode DeconvSingleInputExecution::onResize(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto input = inputs[0], output = outputs[0];
const int UNIT = 1;
auto convCommon = mOp->main_as_Convolution2D()->common();
// Input Rerange Param
mInpReorderParameter.hw_size = input->height() * input->width();
mInpReorderParameter.ic_stride = mInpReorderParameter.hw_size;
mInpReorderParameter.ib_stride = mInpReorderParameter.hw_size * input->channel();
mInpReorderParameter.oc_stride = mInpReorderParameter.ib_stride;
mInpReorderParameter.ob_stride = mInpReorderParameter.hw_size;
mInpReorderParameter.l_size = input->channel();
mInpReorderParameter.h_size = input->batch() * mInpReorderParameter.hw_size;
mInpReorderParameter.lpack_size = UP_DIV(mInpReorderParameter.l_size, 16);
mInpReorderParameter.hpack_size = UP_DIV(mInpReorderParameter.h_size, 16);
runtime->memcpy((uint8_t*)mGpuInpReorderParam.first + mGpuInpReorderParam.second, &mInpReorderParameter, sizeof(InputReorderParameter), MNNMemcpyHostToDevice);
// Col2Im Param
auto pad = ConvolutionCommon::convolutionTransposePad(input, output, mOp->main_as_Convolution2D()->common());
mCol2ImParamter.dilateX = convCommon->dilateX();
mCol2ImParamter.dilateY = convCommon->dilateY();
mCol2ImParamter.strideX = convCommon->strideX();
mCol2ImParamter.strideY = convCommon->strideY();
mCol2ImParamter.ic = input->channel();
mCol2ImParamter.oc = output->channel();
mCol2ImParamter.kernelX = convCommon->kernelX();
mCol2ImParamter.kernelY = convCommon->kernelY();
mCol2ImParamter.padX = pad.first;
mCol2ImParamter.padY = pad.second;
mCol2ImParamter.ih = input->height();
mCol2ImParamter.iw = input->width();
mCol2ImParamter.oh = output->height();
mCol2ImParamter.ow = output->width();
mCol2ImParamter.ob = output->batch();
runtime->memcpy((uint8_t*)mGpuCol2ImParam.first + mGpuCol2ImParam.second, &mCol2ImParamter, sizeof(Col2ImParameter), MNNMemcpyHostToDevice);
// Matmul Param
int e = output->channel() * mCol2ImParamter.kernelX * mCol2ImParamter.kernelY;
int l = input->channel();
int h = input->height() * input->width() * output->batch();
mMatMulParam.elh[0] = e;
mMatMulParam.elh[1] = l;
mMatMulParam.elh[2] = h;
mMatMulParam.elhPack[0] = UP_DIV(e, 16);
mMatMulParam.elhPack[1] = UP_DIV(l, 16);
mMatMulParam.elhPack[2] = UP_DIV(h, 16);
mMatMulParam.bStride[0] = 0;
mMatMulParam.bStride[1] = input->height() * input->width();
mMatMulParam.bStride[2] = 1;
mMatMulParam.cStride[0] = h;
mMatMulParam.cStride[1] = 0;
mMatMulParam.cStride[2] = 1;
mMatMulParam.aPStride[0] = 256 * mMatMulParam.elhPack[1];
mMatMulParam.aPStride[1] = 256;
mMatMulParam.aPStride[2] = 16;
mMatMulParam.bPStride[0] = 256 * mMatMulParam.elhPack[1];
mMatMulParam.bPStride[1] = 256;
mMatMulParam.bPStride[2] = 16;
if (convCommon->relu()) {
mMatMulParam.minValue = 0.0f;
}
if (convCommon->relu6()) {
mMatMulParam.minValue = 0.0f;
mMatMulParam.maxValue = 6.0f;
}
runtime->memcpy((uint8_t*)mGpuMatMulParam.first + mGpuMatMulParam.second, &mMatMulParam, sizeof(MatMulParam), MNNMemcpyHostToDevice);
// Alloc temp cuda memory
auto pool = static_cast<CUDABackend*>(backend())->getBufferPool();
auto buffer1 = pool->alloc(sizeof(float) * mMatMulParam.elhPack[0] * mMatMulParam.elhPack[2]* MATMULPACK * MATMULPACK);
auto buffer2 = pool->alloc(sizeof(__half) * mMatMulParam.elhPack[1] * mMatMulParam.elhPack[2] * MATMULPACK * MATMULPACK);
mIm2ColBuffer = (float*)((uint8_t*)buffer1.first + buffer1.second);
mInputBuffer = (__half*)((uint8_t*)buffer2.first + buffer2.second);
pool->free(buffer2);
pool->free(buffer1);
return NO_ERROR;
}
ErrorCode DeconvSingleInputExecution::onExecute(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) {
//MNN_PRINT("cuda convSingleInput onExecute in, inputsize:%d %d\n", (int)inputs.size(), workspace_size_);
MNN_ASSERT(inputs.size() == 1);
MNN_ASSERT(outputs.size() == 1);
auto bytes = static_cast<CUDABackend*>(backend())->getBytes(inputs[0]);
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
const void *input_addr = (const void*)inputs[0]->deviceId();
const void *filter_addr = mResource->mFilter;
const void *bias_addr = mResource->mBias;
void *output_addr = (void*)outputs[0]->deviceId();
auto gpuInpReorder = (const InputReorderParameter*)((uint8_t*)mGpuInpReorderParam.first + mGpuInpReorderParam.second);
auto gpuCol2Im = (const Col2ImParameter*)((uint8_t*)mGpuCol2ImParam.first + mGpuCol2ImParam.second);
auto gpuMatMul = (const MatMulParam*)((uint8_t*)mGpuMatMulParam.first + mGpuMatMulParam.second);
const int rerangeCount = mInpReorderParameter.lpack_size * mInpReorderParameter.hpack_size * 16 * 16;
int inp_block_num = runtime->blocks_num(rerangeCount);
int inp_thread_num = runtime->threads_num();
// Do input Rerange
//runtime->memset(mInputBuffer, 0, mMatMulParam.elhPack[2] * mMatMulParam.elhPack[1] * MATMULPACK * MATMULPACK * sizeof(__half));
DeconvInputRerange<<<inp_block_num, inp_thread_num>>>(rerangeCount, gpuInpReorder, (const float*)input_addr, mInputBuffer);
// Do Gemm operation
GemmPackedMain(runtime, &mMatMulParam, gpuMatMul, (float*)mIm2ColBuffer, (const half*)filter_addr, (const half*)mInputBuffer, nullptr, bytes, false, false);
// Do Col2Im trans
int height_col = mCol2ImParamter.ih;
int width_col = mCol2ImParamter.iw;
int num_kernels = mCol2ImParamter.ob * UP_DIV(mCol2ImParamter.oc, 16) * mCol2ImParamter.oh * mCol2ImParamter.ow * 16;
int col2im_block_num = runtime->blocks_num(num_kernels);
int col2im_thread_num = runtime->threads_num();
// printf("col2im:%d, %d-%d-%d-%d-%d-%d\n %d-%d-%d-%d-%d-%d\n %d-%d\n", mCol2ImParamter.ob, mCol2ImParamter.oh, mCol2ImParamter.ow, mCol2ImParamter.oc, \
// mCol2ImParamter.ih, mCol2ImParamter.iw, mCol2ImParamter.ic, \
// mCol2ImParamter.padX, mCol2ImParamter.padY, mCol2ImParamter.kernelX, mCol2ImParamter.kernelY, mCol2ImParamter.strideX, mCol2ImParamter.strideY, \
// col2im_block_num, col2im_thread_num);
Col2Im<float><<<col2im_block_num, col2im_thread_num>>>(
num_kernels, (const float*)mIm2ColBuffer, mCol2ImParamter.ob, mCol2ImParamter.oh, mCol2ImParamter.ow, mCol2ImParamter.oc,
mCol2ImParamter.kernelY, mCol2ImParamter.kernelX, mCol2ImParamter.padY, mCol2ImParamter.padX,
mCol2ImParamter.strideY, mCol2ImParamter.strideX, mCol2ImParamter.dilateY, mCol2ImParamter.dilateX,
height_col, width_col, (const float*)bias_addr, (float *)output_addr);
return NO_ERROR;
}
class CUDADeconvolutionCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (nullptr != op->main_as_Convolution2D()->quanParameter()) {
auto quan = op->main_as_Convolution2D()->quanParameter();
if (1 == quan->type() || 2 == quan->type()) {
MNN_PRINT("cuda Deconv quant type 1 or 2 not support\n");
return nullptr;
}
}
if(inputs.size() == 3) {
MNN_PRINT("Deconv inputs size:3 not support\n");
return nullptr;
} else if(inputs.size() == 1) {
std::shared_ptr<DeconvSingleInputExecution::Resource> resource(new DeconvSingleInputExecution::Resource(backend, op));
return new DeconvSingleInputExecution(backend, op, resource);
} else {
MNN_PRINT("Deconv inputs size:%d not support", (int)inputs.size());
return nullptr;
}
}
};
CUDACreatorRegister<CUDADeconvolutionCreator> __DeConvExecution(OpType_Deconvolution);
}// namespace CUDA
}// namespace MNN
|
3e77ca5a342d5ade9a6740f4806483a598c1a390.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "profiler.h"
#include <vector>
int main()
{
size_t n = 128*1024;
size_t n_bytes = n*sizeof(double);
double *a_dev = nullptr, *buffer = nullptr;
std::vector<double> a_host(n,0.);
profiler prof;
hipHostMalloc( (void **) &buffer, n_bytes);
for(auto i = 0; i < n; ++i)
{
a_host[i] = i;
buffer[i] = i;
}
hipError_t cuerr = hipMalloc( (void**)&a_dev, n_bytes);
if (cuerr != hipSuccess)
{
std::cout << "Cannot allocate GPU memory" << hipGetErrorString(cuerr);
return 1;
}
prof.tic("Pinned memory");
hipDeviceSynchronize();
for(auto i = 0; i < 1000; ++i)
{
cuerr = hipMemcpy( a_dev, buffer, n_bytes, hipMemcpyHostToDevice );
if (cuerr != hipSuccess)
{
std::cout << "Cannot copy data to device" << hipGetErrorString(cuerr);
return 1;
}
cuerr = hipMemcpy ( buffer, a_dev, n_bytes, hipMemcpyDeviceToHost );
if (cuerr != hipSuccess)
{
std::cout << "Cannot copy data from device" << hipGetErrorString(cuerr);
return 1;
}
}
hipDeviceSynchronize();
prof.toc("Pinned memory");
prof.tic("Paged memory");
hipDeviceSynchronize();
for(auto i = 0; i < 1000; ++i)
{
cuerr = hipMemcpy( a_dev, a_host.data(), n_bytes, hipMemcpyHostToDevice );
if (cuerr != hipSuccess)
{
std::cout << "Cannot copy data to device" << hipGetErrorString(cuerr);
return 1;
}
cuerr = hipMemcpy ( a_host.data(), a_dev, n_bytes, hipMemcpyDeviceToHost );
if (cuerr != hipSuccess)
{
std::cout << "Cannot copy data from device" << hipGetErrorString(cuerr);
return 1;
}
}
hipDeviceSynchronize();
prof.toc("Paged memory");
hipFree(a_dev);
hipHostFree(buffer);
prof.report();
return 0;
}
| 3e77ca5a342d5ade9a6740f4806483a598c1a390.cu | #include <iostream>
#include "profiler.h"
#include <vector>
int main()
{
size_t n = 128*1024;
size_t n_bytes = n*sizeof(double);
double *a_dev = nullptr, *buffer = nullptr;
std::vector<double> a_host(n,0.);
profiler prof;
cudaMallocHost( (void **) &buffer, n_bytes);
for(auto i = 0; i < n; ++i)
{
a_host[i] = i;
buffer[i] = i;
}
cudaError_t cuerr = cudaMalloc( (void**)&a_dev, n_bytes);
if (cuerr != cudaSuccess)
{
std::cout << "Cannot allocate GPU memory" << cudaGetErrorString(cuerr);
return 1;
}
prof.tic("Pinned memory");
cudaThreadSynchronize();
for(auto i = 0; i < 1000; ++i)
{
cuerr = cudaMemcpy( a_dev, buffer, n_bytes, cudaMemcpyHostToDevice );
if (cuerr != cudaSuccess)
{
std::cout << "Cannot copy data to device" << cudaGetErrorString(cuerr);
return 1;
}
cuerr = cudaMemcpy ( buffer, a_dev, n_bytes, cudaMemcpyDeviceToHost );
if (cuerr != cudaSuccess)
{
std::cout << "Cannot copy data from device" << cudaGetErrorString(cuerr);
return 1;
}
}
cudaThreadSynchronize();
prof.toc("Pinned memory");
prof.tic("Paged memory");
cudaThreadSynchronize();
for(auto i = 0; i < 1000; ++i)
{
cuerr = cudaMemcpy( a_dev, a_host.data(), n_bytes, cudaMemcpyHostToDevice );
if (cuerr != cudaSuccess)
{
std::cout << "Cannot copy data to device" << cudaGetErrorString(cuerr);
return 1;
}
cuerr = cudaMemcpy ( a_host.data(), a_dev, n_bytes, cudaMemcpyDeviceToHost );
if (cuerr != cudaSuccess)
{
std::cout << "Cannot copy data from device" << cudaGetErrorString(cuerr);
return 1;
}
}
cudaThreadSynchronize();
prof.toc("Paged memory");
cudaFree(a_dev);
cudaFreeHost(buffer);
prof.report();
return 0;
}
|
fa021a1b66e937c9f8039daacae341cde4911751.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "utils/vector_operations.hpp"
#include "pcl/gpu/utils/device/funcattrib.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "pcl/gpu/features/device/rodrigues.hpp"
#include "pcl/gpu/features/device/pair_features.hpp"
namespace pcl
{
namespace device
{
struct PpfImpl
{
enum
{
CTA_SIZE = 256
};
PtrSz<PointType> points;
const NormalType *normals;
PtrSz<int> indices;
mutable PPFSignature* output;
__device__ __forceinline__ void operator()() const
{
int total = points.size * indices.size;
int idx = blockIdx.x * CTA_SIZE + threadIdx.x;
if (idx > total)
return;
int index_i = idx / points.size; // indices
int j = idx % points.size; // points
int i = indices.data[index_i];
PPFSignature out;
if (i != j)
{
float3 pi = fetch(points.data, i);
float3 ni = fetch(normals, i);
float3 pj = fetch(points.data, j);
float3 nj = fetch(normals, j);
//if (computePPFPairFeature(pi, ni, pj, nj, out.f1, out.f2, out.f3, out.f4))
if (computePairFeatures(pi, ni, pj, nj, out.f1, out.f2, out.f3, out.f4))
computeAlfaM(pi, ni, pj, out.alpha_m);
else
out.f1 = out.f2 = out.f3 = out.f4 = out.alpha_m = 0.f;
}
else
out.f1 = out.f2 = out.f3 = out.f4 = out.alpha_m = 0.f;
output[idx] = out;
}
template<class T> __forceinline__ __device__ float3 fetch(const T* data, int index) const
{
//return *(float3*)&data[index];
T t = data[index];
return make_float3(t.x, t.y, t.z);
}
};
__global__ void estimatePpfKernel(const PpfImpl ppf) { ppf(); }
struct PpfRgbImpl
{
enum
{
CTA_SIZE = 256
};
PtrSz<PointXYZRGB> points;
const NormalType *normals;
PtrSz<int> indices;
mutable PPFRGBSignature* output;
__device__ __forceinline__ void operator()() const
{
int total = points.size * indices.size;
int idx = blockIdx.x * CTA_SIZE + threadIdx.x;
if (idx > total)
return;
int index_i = idx / points.size; // indices
int j = idx % points.size; // points
int i = indices.data[index_i];
PPFRGBSignature out;
if (i != j)
{
int ci;
float3 pi = fetchXYZRGB(points.data, i, ci);
float3 ni = fetch(normals, i);
int cj;
float3 pj = fetchXYZRGB(points.data, j, cj);
float3 nj = fetch(normals, j);
if (computeRGBPairFeatures(pi, ni, ci, pj, nj, cj, out.f1, out.f2, out.f3, out.f4, out.r_ratio, out.g_ratio, out.b_ratio))
//if (computePairFeatures(pi, ni, pj, nj, out.f1, out.f2, out.f3, out.f4))
{
computeAlfaM(pi, ni, pj, out.alpha_m);
//computeRGBPairFeatures_RGBOnly(ci, cj, out.r_ratio, out.g_ratio, out.b_ratio);
}
else
out.f1 = out.f2 = out.f3 = out.f4 = out.r_ratio = out.g_ratio = out.b_ratio = out.alpha_m = 0.f;
}
else
out.f1 = out.f2 = out.f3 = out.f4 = out.r_ratio = out.g_ratio = out.b_ratio = out.alpha_m = 0.f;
output[idx] = out;
}
template<class T> __forceinline__ __device__ float3 fetch(const T* data, int index) const
{
//return *(float3*)&data[index];
T t = data[index];
return make_float3(t.x, t.y, t.z);
}
__forceinline__ __device__ float3 fetchXYZRGB(const PointXYZRGB* data, int index, int& color) const
{
float4 xyzrgb = data[index];
color = __float_as_int(xyzrgb.w);
return make_float3(xyzrgb.x, xyzrgb.y, xyzrgb.z);
}
};
__global__ void estimatePpfRgbKernel(const PpfRgbImpl ppfrgb) { ppfrgb(); }
}
}
void pcl::device::computePPF(const PointCloud& input, const Normals& normals, const Indices& indices, DeviceArray<PPFSignature>& output)
{
int total = (int)input.size() * (int)indices.size();
output.create(total);
PpfImpl ppf;
ppf.points = input;
ppf.normals = normals;
ppf.indices = indices;
ppf.output = output;
int block = PpfImpl::CTA_SIZE;
int grid = divUp(total, block);
hipLaunchKernelGGL(( estimatePpfKernel), dim3(grid), dim3(block), 0, 0, ppf);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
//printFuncAttrib(estimatePpfKernel);
}
void pcl::device::computePPFRGB(const PointXYZRGBCloud& input, const Normals& normals, const Indices& indices, DeviceArray<PPFRGBSignature>& output)
{
int total = (int)input.size() * (int)indices.size();
output.create(total);
PpfRgbImpl ppfrgb;
ppfrgb.points = input;
ppfrgb.normals = normals;
ppfrgb.indices = indices;
ppfrgb.output = output;
int block = PpfRgbImpl::CTA_SIZE;
int grid = divUp(total, block);
hipLaunchKernelGGL(( estimatePpfRgbKernel), dim3(grid), dim3(block), 0, 0, ppfrgb);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
//printFuncAttrib(estimatePpfRgbKernel);
}
namespace pcl
{
namespace device
{
struct PpfRgbRegionImpl
{
enum
{
CTA_SIZE = 256,
WARPS = CTA_SIZE / Warp::WARP_SIZE,
FSize = sizeof(PPFRGBSignature)/sizeof(float),
FSizeWithoutAlfaM = FSize - 1
};
struct plus
{
__forceinline__ __device__ float operator()(const float &lhs, const volatile float& rhs) const { return lhs + rhs; }
};
const PointXYZRGB* points;
const NormalType* normals;
PtrSz<int> indices;
PtrStep<int> gindices;
const int *sizes;
mutable PPFRGBSignature* output;
__device__ __forceinline__ void operator()() const
{
int tid = threadIdx.x;
int warpid = Warp::id();
int index_i = blockIdx.x * WARPS + warpid;
if (index_i >= indices.size)
return;
int i = indices[index_i];
int size = sizes[index_i];
const int* ginds = gindices.ptr(index_i);
int lane = Warp::laneId();
__shared__ float3 points_buf[WARPS];
__shared__ float3 normasl_buf[WARPS];
__shared__ int colors_buf[WARPS];
if (lane == 0)
{
points_buf[warpid] = fetchXYZRGB(points, i, colors_buf[warpid]);
normasl_buf[warpid] = fetch(normals, i);
}
__shared__ float cta_buf[7][CTA_SIZE + 1];
cta_buf[0][tid] = cta_buf[1][tid] = cta_buf[2][tid] = cta_buf[3][tid] = 0.f;
cta_buf[4][tid] = cta_buf[5][tid] = cta_buf[6][tid] = 0.f;
for(int c = lane; c < size; c+= Warp::STRIDE)
{
int j = ginds[c];
if (i != j)
{
int cj;
float3 pj = fetchXYZRGB(points, j, cj);
float3 nj = fetch(normals, j);
float f1, f2, f3, f4, r_ratio, g_ratio, b_ratio;
if (computeRGBPairFeatures(points_buf[warpid], normasl_buf[warpid], colors_buf[warpid], pj, nj, cj, f1, f2, f3, f4, r_ratio, g_ratio, b_ratio))
//computeRGBPairFeatures(points_buf[warpid], normasl_buf[warpid], colors_buf[warpid], pj, nj, cj, f1, f2, f3, f4, r_ratio, g_ratio, b_ratio);
{
cta_buf[0][tid] += f1;
cta_buf[1][tid] += f2;
cta_buf[2][tid] += f3;
cta_buf[3][tid] += f4;
cta_buf[4][tid] += r_ratio;
cta_buf[5][tid] += g_ratio;
cta_buf[6][tid] += b_ratio;
}
}
}
Warp::reduce(&cta_buf[0][tid - lane], plus());
Warp::reduce(&cta_buf[1][tid - lane], plus());
Warp::reduce(&cta_buf[2][tid - lane], plus());
Warp::reduce(&cta_buf[3][tid - lane], plus());
Warp::reduce(&cta_buf[4][tid - lane], plus());
Warp::reduce(&cta_buf[5][tid - lane], plus());
Warp::reduce(&cta_buf[6][tid - lane], plus());
float val = 0.f;
if (lane < FSizeWithoutAlfaM)
val = cta_buf[lane][tid - lane]/size;
float *ptr = (float*)&output[index_i];
if (lane < FSize)
ptr[lane] = val;
}
__forceinline__ __device__ float3 fetchXYZRGB(const PointXYZRGB* data, int index, int& color) const
{
float4 xyzrgb = data[index];
color = __float_as_int(xyzrgb.w);
return make_float3(xyzrgb.x, xyzrgb.y, xyzrgb.z);
}
template<class T> __forceinline__ __device__ float3 fetch(const T* data, int index) const
{
//return *(float3*)&data[index];
T t = data[index];
return make_float3(t.x, t.y, t.z);
}
};
__global__ void estiamtePpfRgbRegionKernel(const PpfRgbRegionImpl impl) { impl(); }
}
}
void pcl::device::computePPFRGBRegion(const PointXYZRGBCloud& cloud, const Normals& normals, const Indices& indices, const NeighborIndices& nn_indices, DeviceArray<PPFRGBSignature>& output)
{
output.create(nn_indices.sizes.size());
PpfRgbRegionImpl impl;
impl.points = cloud;
impl.normals = normals;
impl.indices = indices;
impl.gindices = nn_indices;
impl.sizes = nn_indices.sizes;
impl.output = output;
int block = PpfRgbRegionImpl::CTA_SIZE;
int grid = divUp((int)impl.indices.size, PpfRgbRegionImpl::WARPS);
hipLaunchKernelGGL(( estiamtePpfRgbRegionKernel), dim3(grid), dim3(block), 0, 0, impl);
cudaSafeCall( hipGetLastError() );
cudaSafeCall(hipDeviceSynchronize());
//printFuncAttrib(estiamtePpfRgbRegionKernel);
} | fa021a1b66e937c9f8039daacae341cde4911751.cu | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "utils/vector_operations.hpp"
#include "pcl/gpu/utils/device/funcattrib.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "pcl/gpu/features/device/rodrigues.hpp"
#include "pcl/gpu/features/device/pair_features.hpp"
namespace pcl
{
namespace device
{
struct PpfImpl
{
enum
{
CTA_SIZE = 256
};
PtrSz<PointType> points;
const NormalType *normals;
PtrSz<int> indices;
mutable PPFSignature* output;
__device__ __forceinline__ void operator()() const
{
int total = points.size * indices.size;
int idx = blockIdx.x * CTA_SIZE + threadIdx.x;
if (idx > total)
return;
int index_i = idx / points.size; // indices
int j = idx % points.size; // points
int i = indices.data[index_i];
PPFSignature out;
if (i != j)
{
float3 pi = fetch(points.data, i);
float3 ni = fetch(normals, i);
float3 pj = fetch(points.data, j);
float3 nj = fetch(normals, j);
//if (computePPFPairFeature(pi, ni, pj, nj, out.f1, out.f2, out.f3, out.f4))
if (computePairFeatures(pi, ni, pj, nj, out.f1, out.f2, out.f3, out.f4))
computeAlfaM(pi, ni, pj, out.alpha_m);
else
out.f1 = out.f2 = out.f3 = out.f4 = out.alpha_m = 0.f;
}
else
out.f1 = out.f2 = out.f3 = out.f4 = out.alpha_m = 0.f;
output[idx] = out;
}
template<class T> __forceinline__ __device__ float3 fetch(const T* data, int index) const
{
//return *(float3*)&data[index];
T t = data[index];
return make_float3(t.x, t.y, t.z);
}
};
__global__ void estimatePpfKernel(const PpfImpl ppf) { ppf(); }
struct PpfRgbImpl
{
enum
{
CTA_SIZE = 256
};
PtrSz<PointXYZRGB> points;
const NormalType *normals;
PtrSz<int> indices;
mutable PPFRGBSignature* output;
__device__ __forceinline__ void operator()() const
{
int total = points.size * indices.size;
int idx = blockIdx.x * CTA_SIZE + threadIdx.x;
if (idx > total)
return;
int index_i = idx / points.size; // indices
int j = idx % points.size; // points
int i = indices.data[index_i];
PPFRGBSignature out;
if (i != j)
{
int ci;
float3 pi = fetchXYZRGB(points.data, i, ci);
float3 ni = fetch(normals, i);
int cj;
float3 pj = fetchXYZRGB(points.data, j, cj);
float3 nj = fetch(normals, j);
if (computeRGBPairFeatures(pi, ni, ci, pj, nj, cj, out.f1, out.f2, out.f3, out.f4, out.r_ratio, out.g_ratio, out.b_ratio))
//if (computePairFeatures(pi, ni, pj, nj, out.f1, out.f2, out.f3, out.f4))
{
computeAlfaM(pi, ni, pj, out.alpha_m);
//computeRGBPairFeatures_RGBOnly(ci, cj, out.r_ratio, out.g_ratio, out.b_ratio);
}
else
out.f1 = out.f2 = out.f3 = out.f4 = out.r_ratio = out.g_ratio = out.b_ratio = out.alpha_m = 0.f;
}
else
out.f1 = out.f2 = out.f3 = out.f4 = out.r_ratio = out.g_ratio = out.b_ratio = out.alpha_m = 0.f;
output[idx] = out;
}
template<class T> __forceinline__ __device__ float3 fetch(const T* data, int index) const
{
//return *(float3*)&data[index];
T t = data[index];
return make_float3(t.x, t.y, t.z);
}
__forceinline__ __device__ float3 fetchXYZRGB(const PointXYZRGB* data, int index, int& color) const
{
float4 xyzrgb = data[index];
color = __float_as_int(xyzrgb.w);
return make_float3(xyzrgb.x, xyzrgb.y, xyzrgb.z);
}
};
__global__ void estimatePpfRgbKernel(const PpfRgbImpl ppfrgb) { ppfrgb(); }
}
}
void pcl::device::computePPF(const PointCloud& input, const Normals& normals, const Indices& indices, DeviceArray<PPFSignature>& output)
{
int total = (int)input.size() * (int)indices.size();
output.create(total);
PpfImpl ppf;
ppf.points = input;
ppf.normals = normals;
ppf.indices = indices;
ppf.output = output;
int block = PpfImpl::CTA_SIZE;
int grid = divUp(total, block);
estimatePpfKernel<<<grid, block>>>(ppf);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
//printFuncAttrib(estimatePpfKernel);
}
void pcl::device::computePPFRGB(const PointXYZRGBCloud& input, const Normals& normals, const Indices& indices, DeviceArray<PPFRGBSignature>& output)
{
int total = (int)input.size() * (int)indices.size();
output.create(total);
PpfRgbImpl ppfrgb;
ppfrgb.points = input;
ppfrgb.normals = normals;
ppfrgb.indices = indices;
ppfrgb.output = output;
int block = PpfRgbImpl::CTA_SIZE;
int grid = divUp(total, block);
estimatePpfRgbKernel<<<grid, block>>>(ppfrgb);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
//printFuncAttrib(estimatePpfRgbKernel);
}
namespace pcl
{
namespace device
{
struct PpfRgbRegionImpl
{
enum
{
CTA_SIZE = 256,
WARPS = CTA_SIZE / Warp::WARP_SIZE,
FSize = sizeof(PPFRGBSignature)/sizeof(float),
FSizeWithoutAlfaM = FSize - 1
};
struct plus
{
__forceinline__ __device__ float operator()(const float &lhs, const volatile float& rhs) const { return lhs + rhs; }
};
const PointXYZRGB* points;
const NormalType* normals;
PtrSz<int> indices;
PtrStep<int> gindices;
const int *sizes;
mutable PPFRGBSignature* output;
__device__ __forceinline__ void operator()() const
{
int tid = threadIdx.x;
int warpid = Warp::id();
int index_i = blockIdx.x * WARPS + warpid;
if (index_i >= indices.size)
return;
int i = indices[index_i];
int size = sizes[index_i];
const int* ginds = gindices.ptr(index_i);
int lane = Warp::laneId();
__shared__ float3 points_buf[WARPS];
__shared__ float3 normasl_buf[WARPS];
__shared__ int colors_buf[WARPS];
if (lane == 0)
{
points_buf[warpid] = fetchXYZRGB(points, i, colors_buf[warpid]);
normasl_buf[warpid] = fetch(normals, i);
}
__shared__ float cta_buf[7][CTA_SIZE + 1];
cta_buf[0][tid] = cta_buf[1][tid] = cta_buf[2][tid] = cta_buf[3][tid] = 0.f;
cta_buf[4][tid] = cta_buf[5][tid] = cta_buf[6][tid] = 0.f;
for(int c = lane; c < size; c+= Warp::STRIDE)
{
int j = ginds[c];
if (i != j)
{
int cj;
float3 pj = fetchXYZRGB(points, j, cj);
float3 nj = fetch(normals, j);
float f1, f2, f3, f4, r_ratio, g_ratio, b_ratio;
if (computeRGBPairFeatures(points_buf[warpid], normasl_buf[warpid], colors_buf[warpid], pj, nj, cj, f1, f2, f3, f4, r_ratio, g_ratio, b_ratio))
//computeRGBPairFeatures(points_buf[warpid], normasl_buf[warpid], colors_buf[warpid], pj, nj, cj, f1, f2, f3, f4, r_ratio, g_ratio, b_ratio);
{
cta_buf[0][tid] += f1;
cta_buf[1][tid] += f2;
cta_buf[2][tid] += f3;
cta_buf[3][tid] += f4;
cta_buf[4][tid] += r_ratio;
cta_buf[5][tid] += g_ratio;
cta_buf[6][tid] += b_ratio;
}
}
}
Warp::reduce(&cta_buf[0][tid - lane], plus());
Warp::reduce(&cta_buf[1][tid - lane], plus());
Warp::reduce(&cta_buf[2][tid - lane], plus());
Warp::reduce(&cta_buf[3][tid - lane], plus());
Warp::reduce(&cta_buf[4][tid - lane], plus());
Warp::reduce(&cta_buf[5][tid - lane], plus());
Warp::reduce(&cta_buf[6][tid - lane], plus());
float val = 0.f;
if (lane < FSizeWithoutAlfaM)
val = cta_buf[lane][tid - lane]/size;
float *ptr = (float*)&output[index_i];
if (lane < FSize)
ptr[lane] = val;
}
__forceinline__ __device__ float3 fetchXYZRGB(const PointXYZRGB* data, int index, int& color) const
{
float4 xyzrgb = data[index];
color = __float_as_int(xyzrgb.w);
return make_float3(xyzrgb.x, xyzrgb.y, xyzrgb.z);
}
template<class T> __forceinline__ __device__ float3 fetch(const T* data, int index) const
{
//return *(float3*)&data[index];
T t = data[index];
return make_float3(t.x, t.y, t.z);
}
};
__global__ void estiamtePpfRgbRegionKernel(const PpfRgbRegionImpl impl) { impl(); }
}
}
void pcl::device::computePPFRGBRegion(const PointXYZRGBCloud& cloud, const Normals& normals, const Indices& indices, const NeighborIndices& nn_indices, DeviceArray<PPFRGBSignature>& output)
{
output.create(nn_indices.sizes.size());
PpfRgbRegionImpl impl;
impl.points = cloud;
impl.normals = normals;
impl.indices = indices;
impl.gindices = nn_indices;
impl.sizes = nn_indices.sizes;
impl.output = output;
int block = PpfRgbRegionImpl::CTA_SIZE;
int grid = divUp((int)impl.indices.size, PpfRgbRegionImpl::WARPS);
estiamtePpfRgbRegionKernel<<<grid, block>>>(impl);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall(cudaDeviceSynchronize());
//printFuncAttrib(estiamtePpfRgbRegionKernel);
} |
88a7d56ca91bd3a85c6c26112cefb0d4bdb9edf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2021 by Contributors
*/
#include <GPUTreeShap/gpu_treeshap.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <memory>
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../data/device_adapter.cuh"
#include "../data/ellpack_page.cuh"
#include "../data/proxy_dmatrix.h"
#include "../gbm/gbtree_model.h"
#include "predict_fn.h"
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct TreeView {
RegTree::CategoricalSplitMatrix cats;
common::Span<RegTree::Node const> d_tree;
XGBOOST_DEVICE
TreeView(size_t tree_begin, size_t tree_idx,
common::Span<const RegTree::Node> d_nodes,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories) {
auto begin = d_tree_segments[tree_idx - tree_begin];
auto n_nodes = d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin];
d_tree = d_nodes.subspan(begin, n_nodes);
auto tree_cat_ptrs = d_cat_node_segments.subspan(begin, n_nodes);
auto tree_split_types = d_tree_split_types.subspan(begin, n_nodes);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
cats.split_type = tree_split_types;
cats.categories = tree_categories;
cats.node_ptr = tree_cat_ptrs;
}
__device__ bool HasCategoricalSplit() const {
return !cats.categories.empty();
}
};
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start, float)
: use_shared(use_shared),
data(data),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool, bst_feature_t, bst_row_t,
size_t, float)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
if (common::IsCat(matrix.feature_types, fidx)) {
return matrix.gidx_fvalue_map[gidx];
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
data::IsValidFunctor is_valid;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start, float missing) :
batch{batch},
columns{num_features},
use_shared{use_shared},
is_valid{missing} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
auto value = batch.GetElement(i).value;
if (is_valid(value)) {
smem[threadIdx.x * num_features + (i - beg)] = value;
}
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
auto value = batch.GetElement(ridx * columns + fidx).value;
if (is_valid(value)) {
return value;
} else {
return nan("");
}
}
};
template <bool has_missing, bool has_categorical, typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree.d_tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
bool is_missing = common::CheckNAN(fvalue);
nidx = GetNextNode<has_missing, has_categorical>(n, nidx, fvalue,
is_missing, tree.cats);
n = tree.d_tree[nidx];
}
return nidx;
}
template <bool has_missing, typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = -1;
if (tree.HasCategoricalSplit()) {
nidx = GetLeafIndex<has_missing, true>(ridx, tree, loader);
} else {
nidx = GetLeafIndex<has_missing, false>(ridx, tree, loader);
}
return tree.d_tree[nidx].LeafValue();
}
template <typename Loader, typename Data>
__global__ void
PredictLeafKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared,
float missing) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
for (size_t tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_node_t leaf = -1;
if (d_tree.HasCategoricalSplit()) {
leaf = GetLeafIndex<true, true>(ridx, d_tree, &loader);
} else {
leaf = GetLeafIndex<true, false>(ridx, d_tree, &loader);
}
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data, bool has_missing = true>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group, float missing) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
float leaf = GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(hipMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), hipMemcpyDefault));
dh::safe_cuda(hipMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), hipMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments =
HostDeviceVector<RegTree::Segment>(h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::Segment> &h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->num_output_group;
}
};
struct ShapSplitCondition {
ShapSplitCondition() = default;
XGBOOST_DEVICE
ShapSplitCondition(float feature_lower_bound, float feature_upper_bound,
bool is_missing_branch, common::CatBitField cats)
: feature_lower_bound(feature_lower_bound),
feature_upper_bound(feature_upper_bound),
is_missing_branch(is_missing_branch), categories{std::move(cats)} {
assert(feature_lower_bound <= feature_upper_bound);
}
/*! Feature values >= lower and < upper flow down this path. */
float feature_lower_bound;
float feature_upper_bound;
/*! Feature value set to true flow down this path. */
common::CatBitField categories;
/*! Do missing values flow down this path? */
bool is_missing_branch;
// Does this instance flow down this path?
XGBOOST_DEVICE bool EvaluateSplit(float x) const {
// is nan
if (isnan(x)) {
return is_missing_branch;
}
if (categories.Size() != 0) {
auto cat = static_cast<uint32_t>(x);
return categories.Check(cat);
} else {
return x >= feature_lower_bound && x < feature_upper_bound;
}
}
// the &= op in bitfiled is per cuda thread, this one loops over the entire
// bitfield.
XGBOOST_DEVICE static common::CatBitField Intersect(common::CatBitField l,
common::CatBitField r) {
if (l.Data() == r.Data()) {
return l;
}
if (l.Size() > r.Size()) {
thrust::swap(l, r);
}
for (size_t i = 0; i < r.Bits().size(); ++i) {
l.Bits()[i] &= r.Bits()[i];
}
return l;
}
// Combine two split conditions on the same feature
XGBOOST_DEVICE void Merge(ShapSplitCondition other) {
// Combine duplicate features
if (categories.Size() != 0 || other.categories.Size() != 0) {
categories = Intersect(categories, other.categories);
} else {
feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound);
feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound);
}
is_missing_branch = is_missing_branch && other.is_missing_branch;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>> *paths,
DeviceModel *model, dh::device_vector<uint32_t> *path_categories,
int gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
auto& device_model = *model;
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{static_cast<int64_t>(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::hip::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::hip::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = dh::ToSpan(*paths);
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
auto d_split_types = device_model.split_types.ConstDeviceSpan();
auto d_cat_segments = device_model.categories_tree_segments.ConstDeviceSpan();
auto d_cat_node_segments = device_model.categories_node_segments.ConstDeviceSpan();
size_t max_cat = 0;
if (thrust::any_of(dh::tbegin(d_split_types), dh::tend(d_split_types),
common::IsCatOp{})) {
dh::PinnedMemory pinned;
auto h_max_cat = pinned.GetSpan<RegTree::Segment>(1);
auto max_elem_it = dh::MakeTransformIterator<size_t>(
dh::tbegin(d_cat_node_segments),
[] __device__(RegTree::Segment seg) { return seg.size; });
size_t max_cat_it =
thrust::max_element(thrust::device, max_elem_it,
max_elem_it + d_cat_node_segments.size()) -
max_elem_it;
dh::safe_cuda(hipMemcpy(h_max_cat.data(),
d_cat_node_segments.data() + max_cat_it,
h_max_cat.size_bytes(), hipMemcpyDeviceToHost));
max_cat = h_max_cat[0].size;
CHECK_GE(max_cat, 1);
path_categories->resize(max_cat * paths->size());
}
auto d_model_categories = device_model.categories.DeviceSpan();
common::Span<uint32_t> d_path_categories = dh::ToSpan(*path_categories);
dh::LaunchN(info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
TreeView tree{0, path_info.tree_idx, d_nodes,
d_tree_segments, d_split_types, d_cat_segments,
d_cat_node_segments, d_model_categories};
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = tree.d_tree[child.Parent()];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = -inf;
float upper_bound = inf;
common::CatBitField bits;
if (common::IsCat(tree.cats.split_type, child.Parent())) {
auto path_cats = d_path_categories.subspan(max_cat * output_position, max_cat);
size_t size = tree.cats.node_ptr[child.Parent()].size;
auto node_cats = tree.cats.categories.subspan(tree.cats.node_ptr[child.Parent()].beg, size);
SPAN_CHECK(path_cats.size() >= node_cats.size());
for (size_t i = 0; i < node_cats.size(); ++i) {
path_cats[i] = is_left_path ? ~node_cats[i] : node_cats[i];
}
bits = common::CatBitField{path_cats};
} else {
lower_bound = is_left_path ? -inf : parent.SplitCond();
upper_bound = is_left_path ? parent.SplitCond() : inf;
}
d_paths[output_position--] =
gpu_treeshap::PathElement<ShapSplitCondition>{
idx, parent.SplitIndex(),
group, ShapSplitCondition{lower_bound, upper_bound, is_missing_path, bits},
zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, ShapSplitCondition{-inf, inf, false, {}}, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
DeviceModel const& model,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset, bool is_dense) const {
batch.offset.SetDevice(ctx_->gpu_id);
batch.data.SetDevice(ctx_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
auto const kernel = [&](auto predict_fn) {
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
predict_fn, data, model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(),
model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
num_features, num_rows, entry_start, use_shared, model.num_group,
nan(""));
};
if (is_dense) {
kernel(PredictKernel<SparsePageLoader, SparsePageView, false>);
} else {
kernel(PredictKernel<SparsePageLoader, SparsePageView, true>);
}
}
void PredictInternal(EllpackDeviceAccessor const& batch,
DeviceModel const& model,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) const {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
DeviceModel d_model;
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model.num_group, nan(""));
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) const {
if (tree_end - tree_begin == 0) {
return;
}
out_preds->SetDevice(ctx_->gpu_id);
auto const& info = dmat->Info();
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, ctx_->gpu_id);
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, d_model, model.learner_model_param->num_feature,
out_preds, batch_offset, dmat->IsDense());
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>(BatchParam{})) {
dmat->Info().feature_types.SetDevice(ctx_->gpu_id);
auto feature_types = dmat->Info().feature_types.ConstDeviceSpan();
this->PredictInternal(
page.Impl()->GetDeviceAccessor(ctx_->gpu_id, feature_types),
d_model,
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
~GPUPredictor() override {
if (ctx_->gpu_id >= 0 && ctx_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, uint32_t tree_begin,
uint32_t tree_end = 0) const override {
int device = ctx_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
auto* out_preds = &predts->predictions;
if (tree_end == 0) {
tree_end = model.trees.size();
}
this->DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const {
uint32_t const output_groups = model.learner_model_param->num_output_group;
auto m = dmlc::get<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(dh::CurrentDevice(), m->DeviceIdx())
<< "XGBoost is running on device: " << this->ctx_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
if (p_m) {
p_m->Info().num_row_ = m->NumRows();
this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model);
} else {
MetaInfo info;
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
}
out_preds->predictions.SetDevice(m->DeviceIdx());
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(m->NumRows(), BLOCK_THREADS));
auto max_shared_memory_bytes = dh::MaxSharedMemory(m->DeviceIdx());
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(m->NumColumns(), max_shared_memory_bytes);
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, m->DeviceIdx());
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
m->NumRows(), entry_start, use_shared, output_groups, missing);
}
bool InplacePredict(std::shared_ptr<DMatrix> p_m, const gbm::GBTreeModel& model, float missing,
PredictionCacheEntry* out_preds, uint32_t tree_begin,
unsigned tree_end) const override {
auto proxy = dynamic_cast<data::DMatrixProxy*>(p_m.get());
CHECK(proxy)<< "Inplace predict accepts only DMatrixProxy as input.";
auto x = proxy->Adapter();
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<data::CupyAdapter,
DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<data::CudfAdapter,
DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else {
return false;
}
return true;
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate, int,
unsigned) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShap<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
auto base_score = model.learner_model_param->BaseScore(ctx_);
dh::LaunchN(p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score(0) : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShapInteractions<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
auto base_score = model.learner_model_param->BaseScore(ctx_);
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(row_idx, ngroup, group, n_features,
n_features, n_features)] +=
margin.empty() ? base_score(0) : margin[idx];
});
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned) const override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix *p_fmat, HostDeviceVector<bst_float> *predictions,
const gbm::GBTreeModel &model,
unsigned tree_end) const override {
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes = SharedMemoryBytes<kBlockThreads>(
info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(ctx_->gpu_id);
predictions->Resize(num_rows * tree_end);
DeviceModel d_model;
d_model.Init(model, 0, tree_end, this->ctx_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>(BatchParam{})) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(ctx_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
static size_t ConfigureDevice(int device) {
if (device >= 0) {
return dh::MaxSharedMemory(device);
}
return 0;
}
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
} // namespace predictor
} // namespace xgboost
| 88a7d56ca91bd3a85c6c26112cefb0d4bdb9edf8.cu | /*!
* Copyright 2017-2021 by Contributors
*/
#include <GPUTreeShap/gpu_treeshap.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <memory>
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../data/device_adapter.cuh"
#include "../data/ellpack_page.cuh"
#include "../data/proxy_dmatrix.h"
#include "../gbm/gbtree_model.h"
#include "predict_fn.h"
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct TreeView {
RegTree::CategoricalSplitMatrix cats;
common::Span<RegTree::Node const> d_tree;
XGBOOST_DEVICE
TreeView(size_t tree_begin, size_t tree_idx,
common::Span<const RegTree::Node> d_nodes,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories) {
auto begin = d_tree_segments[tree_idx - tree_begin];
auto n_nodes = d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin];
d_tree = d_nodes.subspan(begin, n_nodes);
auto tree_cat_ptrs = d_cat_node_segments.subspan(begin, n_nodes);
auto tree_split_types = d_tree_split_types.subspan(begin, n_nodes);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
cats.split_type = tree_split_types;
cats.categories = tree_categories;
cats.node_ptr = tree_cat_ptrs;
}
__device__ bool HasCategoricalSplit() const {
return !cats.categories.empty();
}
};
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start, float)
: use_shared(use_shared),
data(data),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool, bst_feature_t, bst_row_t,
size_t, float)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
if (common::IsCat(matrix.feature_types, fidx)) {
return matrix.gidx_fvalue_map[gidx];
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
data::IsValidFunctor is_valid;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start, float missing) :
batch{batch},
columns{num_features},
use_shared{use_shared},
is_valid{missing} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
auto value = batch.GetElement(i).value;
if (is_valid(value)) {
smem[threadIdx.x * num_features + (i - beg)] = value;
}
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
auto value = batch.GetElement(ridx * columns + fidx).value;
if (is_valid(value)) {
return value;
} else {
return nan("");
}
}
};
template <bool has_missing, bool has_categorical, typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree.d_tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
bool is_missing = common::CheckNAN(fvalue);
nidx = GetNextNode<has_missing, has_categorical>(n, nidx, fvalue,
is_missing, tree.cats);
n = tree.d_tree[nidx];
}
return nidx;
}
template <bool has_missing, typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = -1;
if (tree.HasCategoricalSplit()) {
nidx = GetLeafIndex<has_missing, true>(ridx, tree, loader);
} else {
nidx = GetLeafIndex<has_missing, false>(ridx, tree, loader);
}
return tree.d_tree[nidx].LeafValue();
}
template <typename Loader, typename Data>
__global__ void
PredictLeafKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared,
float missing) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
for (size_t tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_node_t leaf = -1;
if (d_tree.HasCategoricalSplit()) {
leaf = GetLeafIndex<true, true>(ridx, d_tree, &loader);
} else {
leaf = GetLeafIndex<true, false>(ridx, d_tree, &loader);
}
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data, bool has_missing = true>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group, float missing) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
float leaf = GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(cudaMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), cudaMemcpyDefault));
dh::safe_cuda(cudaMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), cudaMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments =
HostDeviceVector<RegTree::Segment>(h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::Segment> &h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->num_output_group;
}
};
struct ShapSplitCondition {
ShapSplitCondition() = default;
XGBOOST_DEVICE
ShapSplitCondition(float feature_lower_bound, float feature_upper_bound,
bool is_missing_branch, common::CatBitField cats)
: feature_lower_bound(feature_lower_bound),
feature_upper_bound(feature_upper_bound),
is_missing_branch(is_missing_branch), categories{std::move(cats)} {
assert(feature_lower_bound <= feature_upper_bound);
}
/*! Feature values >= lower and < upper flow down this path. */
float feature_lower_bound;
float feature_upper_bound;
/*! Feature value set to true flow down this path. */
common::CatBitField categories;
/*! Do missing values flow down this path? */
bool is_missing_branch;
// Does this instance flow down this path?
XGBOOST_DEVICE bool EvaluateSplit(float x) const {
// is nan
if (isnan(x)) {
return is_missing_branch;
}
if (categories.Size() != 0) {
auto cat = static_cast<uint32_t>(x);
return categories.Check(cat);
} else {
return x >= feature_lower_bound && x < feature_upper_bound;
}
}
// the &= op in bitfiled is per cuda thread, this one loops over the entire
// bitfield.
XGBOOST_DEVICE static common::CatBitField Intersect(common::CatBitField l,
common::CatBitField r) {
if (l.Data() == r.Data()) {
return l;
}
if (l.Size() > r.Size()) {
thrust::swap(l, r);
}
for (size_t i = 0; i < r.Bits().size(); ++i) {
l.Bits()[i] &= r.Bits()[i];
}
return l;
}
// Combine two split conditions on the same feature
XGBOOST_DEVICE void Merge(ShapSplitCondition other) {
// Combine duplicate features
if (categories.Size() != 0 || other.categories.Size() != 0) {
categories = Intersect(categories, other.categories);
} else {
feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound);
feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound);
}
is_missing_branch = is_missing_branch && other.is_missing_branch;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>> *paths,
DeviceModel *model, dh::device_vector<uint32_t> *path_categories,
int gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
auto& device_model = *model;
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{static_cast<int64_t>(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::cuda::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::cuda::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = dh::ToSpan(*paths);
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
auto d_split_types = device_model.split_types.ConstDeviceSpan();
auto d_cat_segments = device_model.categories_tree_segments.ConstDeviceSpan();
auto d_cat_node_segments = device_model.categories_node_segments.ConstDeviceSpan();
size_t max_cat = 0;
if (thrust::any_of(dh::tbegin(d_split_types), dh::tend(d_split_types),
common::IsCatOp{})) {
dh::PinnedMemory pinned;
auto h_max_cat = pinned.GetSpan<RegTree::Segment>(1);
auto max_elem_it = dh::MakeTransformIterator<size_t>(
dh::tbegin(d_cat_node_segments),
[] __device__(RegTree::Segment seg) { return seg.size; });
size_t max_cat_it =
thrust::max_element(thrust::device, max_elem_it,
max_elem_it + d_cat_node_segments.size()) -
max_elem_it;
dh::safe_cuda(cudaMemcpy(h_max_cat.data(),
d_cat_node_segments.data() + max_cat_it,
h_max_cat.size_bytes(), cudaMemcpyDeviceToHost));
max_cat = h_max_cat[0].size;
CHECK_GE(max_cat, 1);
path_categories->resize(max_cat * paths->size());
}
auto d_model_categories = device_model.categories.DeviceSpan();
common::Span<uint32_t> d_path_categories = dh::ToSpan(*path_categories);
dh::LaunchN(info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
TreeView tree{0, path_info.tree_idx, d_nodes,
d_tree_segments, d_split_types, d_cat_segments,
d_cat_node_segments, d_model_categories};
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = tree.d_tree[child.Parent()];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = -inf;
float upper_bound = inf;
common::CatBitField bits;
if (common::IsCat(tree.cats.split_type, child.Parent())) {
auto path_cats = d_path_categories.subspan(max_cat * output_position, max_cat);
size_t size = tree.cats.node_ptr[child.Parent()].size;
auto node_cats = tree.cats.categories.subspan(tree.cats.node_ptr[child.Parent()].beg, size);
SPAN_CHECK(path_cats.size() >= node_cats.size());
for (size_t i = 0; i < node_cats.size(); ++i) {
path_cats[i] = is_left_path ? ~node_cats[i] : node_cats[i];
}
bits = common::CatBitField{path_cats};
} else {
lower_bound = is_left_path ? -inf : parent.SplitCond();
upper_bound = is_left_path ? parent.SplitCond() : inf;
}
d_paths[output_position--] =
gpu_treeshap::PathElement<ShapSplitCondition>{
idx, parent.SplitIndex(),
group, ShapSplitCondition{lower_bound, upper_bound, is_missing_path, bits},
zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, ShapSplitCondition{-inf, inf, false, {}}, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
DeviceModel const& model,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset, bool is_dense) const {
batch.offset.SetDevice(ctx_->gpu_id);
batch.data.SetDevice(ctx_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
auto const kernel = [&](auto predict_fn) {
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
predict_fn, data, model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(),
model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
num_features, num_rows, entry_start, use_shared, model.num_group,
nan(""));
};
if (is_dense) {
kernel(PredictKernel<SparsePageLoader, SparsePageView, false>);
} else {
kernel(PredictKernel<SparsePageLoader, SparsePageView, true>);
}
}
void PredictInternal(EllpackDeviceAccessor const& batch,
DeviceModel const& model,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) const {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
DeviceModel d_model;
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model.num_group, nan(""));
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) const {
if (tree_end - tree_begin == 0) {
return;
}
out_preds->SetDevice(ctx_->gpu_id);
auto const& info = dmat->Info();
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, ctx_->gpu_id);
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, d_model, model.learner_model_param->num_feature,
out_preds, batch_offset, dmat->IsDense());
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>(BatchParam{})) {
dmat->Info().feature_types.SetDevice(ctx_->gpu_id);
auto feature_types = dmat->Info().feature_types.ConstDeviceSpan();
this->PredictInternal(
page.Impl()->GetDeviceAccessor(ctx_->gpu_id, feature_types),
d_model,
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
~GPUPredictor() override {
if (ctx_->gpu_id >= 0 && ctx_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, uint32_t tree_begin,
uint32_t tree_end = 0) const override {
int device = ctx_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
auto* out_preds = &predts->predictions;
if (tree_end == 0) {
tree_end = model.trees.size();
}
this->DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const {
uint32_t const output_groups = model.learner_model_param->num_output_group;
auto m = dmlc::get<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(dh::CurrentDevice(), m->DeviceIdx())
<< "XGBoost is running on device: " << this->ctx_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
if (p_m) {
p_m->Info().num_row_ = m->NumRows();
this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model);
} else {
MetaInfo info;
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
}
out_preds->predictions.SetDevice(m->DeviceIdx());
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(m->NumRows(), BLOCK_THREADS));
auto max_shared_memory_bytes = dh::MaxSharedMemory(m->DeviceIdx());
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(m->NumColumns(), max_shared_memory_bytes);
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, m->DeviceIdx());
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
m->NumRows(), entry_start, use_shared, output_groups, missing);
}
bool InplacePredict(std::shared_ptr<DMatrix> p_m, const gbm::GBTreeModel& model, float missing,
PredictionCacheEntry* out_preds, uint32_t tree_begin,
unsigned tree_end) const override {
auto proxy = dynamic_cast<data::DMatrixProxy*>(p_m.get());
CHECK(proxy)<< "Inplace predict accepts only DMatrixProxy as input.";
auto x = proxy->Adapter();
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<data::CupyAdapter,
DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<data::CudfAdapter,
DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else {
return false;
}
return true;
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate, int,
unsigned) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShap<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
auto base_score = model.learner_model_param->BaseScore(ctx_);
dh::LaunchN(p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score(0) : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShapInteractions<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
auto base_score = model.learner_model_param->BaseScore(ctx_);
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(row_idx, ngroup, group, n_features,
n_features, n_features)] +=
margin.empty() ? base_score(0) : margin[idx];
});
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned) const override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix *p_fmat, HostDeviceVector<bst_float> *predictions,
const gbm::GBTreeModel &model,
unsigned tree_end) const override {
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes = SharedMemoryBytes<kBlockThreads>(
info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(ctx_->gpu_id);
predictions->Resize(num_rows * tree_end);
DeviceModel d_model;
d_model.Init(model, 0, tree_end, this->ctx_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>(BatchParam{})) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(ctx_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
static size_t ConfigureDevice(int device) {
if (device >= 0) {
return dh::MaxSharedMemory(device);
}
return 0;
}
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
} // namespace predictor
} // namespace xgboost
|
f1c9a997f5929dac4049dd20185be20ae4dca5d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
**************************************************************************
* \file dct8x8_kernel2.cu
* \brief Contains 2nd kernel implementations of DCT and IDCT routines, used in
* JPEG internal data processing. Optimized device code.
*
* This code implements traditional approach to forward and inverse Discrete
* Cosine Transform to blocks of image pixels (of 8x8 size), as in JPEG standard.
* The data processing is done using floating point representation.
* The routine that performs quantization of coefficients can be found in
* dct8x8_kernel_quantization.cu file.
*/
#pragma once
#include "Common.h"
#define C_a 1.387039845322148f //!< a = (2^0.5) * cos( pi / 16); Used in forward and inverse DCT.
#define C_b 1.306562964876377f //!< b = (2^0.5) * cos( pi / 8); Used in forward and inverse DCT.
#define C_c 1.175875602419359f //!< c = (2^0.5) * cos(3 * pi / 16); Used in forward and inverse DCT.
#define C_d 0.785694958387102f //!< d = (2^0.5) * cos(5 * pi / 16); Used in forward and inverse DCT.
#define C_e 0.541196100146197f //!< e = (2^0.5) * cos(3 * pi / 8); Used in forward and inverse DCT.
#define C_f 0.275899379282943f //!< f = (2^0.5) * cos(7 * pi / 16); Used in forward and inverse DCT.
/**
* Normalization constant that is used in forward and inverse DCT
*/
#define C_norm 0.3535533905932737f // 1 / (8^0.5)
/**
* Width of data block (2nd kernel)
*/
#define KER2_BLOCK_WIDTH 32
/**
* Height of data block (2nd kernel)
*/
#define KER2_BLOCK_HEIGHT 16
/**
* LOG2 of width of data block (2nd kernel)
*/
#define KER2_BW_LOG2 5
/**
* LOG2 of height of data block (2nd kernel)
*/
#define KER2_BH_LOG2 4
/**
* Stride of shared memory buffer (2nd kernel)
*/
#define KER2_SMEMBLOCK_STRIDE (KER2_BLOCK_WIDTH+1)
/**
**************************************************************************
* Performs in-place DCT of vector of 8 elements.
*
* \param Vect0 [IN/OUT] - Pointer to the first element of vector
* \param Step [IN/OUT] - Value to add to ptr to access other elements
*
* \return None
*/
__device__ void CUDAsubroutineInplaceDCTvector(float *Vect0, int Step)
{
float *Vect1 = Vect0 + Step;
float *Vect2 = Vect1 + Step;
float *Vect3 = Vect2 + Step;
float *Vect4 = Vect3 + Step;
float *Vect5 = Vect4 + Step;
float *Vect6 = Vect5 + Step;
float *Vect7 = Vect6 + Step;
float X07P = (*Vect0) + (*Vect7);
float X16P = (*Vect1) + (*Vect6);
float X25P = (*Vect2) + (*Vect5);
float X34P = (*Vect3) + (*Vect4);
float X07M = (*Vect0) - (*Vect7);
float X61M = (*Vect6) - (*Vect1);
float X25M = (*Vect2) - (*Vect5);
float X43M = (*Vect4) - (*Vect3);
float X07P34PP = X07P + X34P;
float X07P34PM = X07P - X34P;
float X16P25PP = X16P + X25P;
float X16P25PM = X16P - X25P;
(*Vect0) = C_norm * (X07P34PP + X16P25PP);
(*Vect2) = C_norm * (C_b * X07P34PM + C_e * X16P25PM);
(*Vect4) = C_norm * (X07P34PP - X16P25PP);
(*Vect6) = C_norm * (C_e * X07P34PM - C_b * X16P25PM);
(*Vect1) = C_norm * (C_a * X07M - C_c * X61M + C_d * X25M - C_f * X43M);
(*Vect3) = C_norm * (C_c * X07M + C_f * X61M - C_a * X25M + C_d * X43M);
(*Vect5) = C_norm * (C_d * X07M + C_a * X61M + C_f * X25M - C_c * X43M);
(*Vect7) = C_norm * (C_f * X07M + C_d * X61M + C_c * X25M + C_a * X43M);
}
/**
**************************************************************************
* Performs in-place IDCT of vector of 8 elements.
*
* \param Vect0 [IN/OUT] - Pointer to the first element of vector
* \param Step [IN/OUT] - Value to add to ptr to access other elements
*
* \return None
*/
__device__ void CUDAsubroutineInplaceIDCTvector(float *Vect0, int Step)
{
float *Vect1 = Vect0 + Step;
float *Vect2 = Vect1 + Step;
float *Vect3 = Vect2 + Step;
float *Vect4 = Vect3 + Step;
float *Vect5 = Vect4 + Step;
float *Vect6 = Vect5 + Step;
float *Vect7 = Vect6 + Step;
float Y04P = (*Vect0) + (*Vect4);
float Y2b6eP = C_b * (*Vect2) + C_e * (*Vect6);
float Y04P2b6ePP = Y04P + Y2b6eP;
float Y04P2b6ePM = Y04P - Y2b6eP;
float Y7f1aP3c5dPP = C_f * (*Vect7) + C_a * (*Vect1) + C_c * (*Vect3) + C_d * (*Vect5);
float Y7a1fM3d5cMP = C_a * (*Vect7) - C_f * (*Vect1) + C_d * (*Vect3) - C_c * (*Vect5);
float Y04M = (*Vect0) - (*Vect4);
float Y2e6bM = C_e * (*Vect2) - C_b * (*Vect6);
float Y04M2e6bMP = Y04M + Y2e6bM;
float Y04M2e6bMM = Y04M - Y2e6bM;
float Y1c7dM3f5aPM = C_c * (*Vect1) - C_d * (*Vect7) - C_f * (*Vect3) - C_a * (*Vect5);
float Y1d7cP3a5fMM = C_d * (*Vect1) + C_c * (*Vect7) - C_a * (*Vect3) + C_f * (*Vect5);
(*Vect0) = C_norm * (Y04P2b6ePP + Y7f1aP3c5dPP);
(*Vect7) = C_norm * (Y04P2b6ePP - Y7f1aP3c5dPP);
(*Vect4) = C_norm * (Y04P2b6ePM + Y7a1fM3d5cMP);
(*Vect3) = C_norm * (Y04P2b6ePM - Y7a1fM3d5cMP);
(*Vect1) = C_norm * (Y04M2e6bMP + Y1c7dM3f5aPM);
(*Vect5) = C_norm * (Y04M2e6bMM - Y1d7cP3a5fMM);
(*Vect2) = C_norm * (Y04M2e6bMM + Y1d7cP3a5fMM);
(*Vect6) = C_norm * (Y04M2e6bMP - Y1c7dM3f5aPM);
}
/**
**************************************************************************
* Performs 8x8 block-wise Forward Discrete Cosine Transform of the given
* image plane and outputs result to the array of coefficients. 2nd implementation.
* This kernel is designed to process image by blocks of blocks8x8 that
* utilizes maximum warps capacity, assuming that it is enough of 8 threads
* per block8x8.
*
* \param SrcDst [OUT] - Coefficients plane
* \param ImgStride [IN] - Stride of SrcDst
*
* \return None
*/
__global__ void CUDAkernel2DCT(float *dst, float *src, int ImgStride){
__shared__ float block[KER2_BLOCK_HEIGHT * KER2_SMEMBLOCK_STRIDE];
int OffsThreadInRow = threadIdx.y * BLOCK_SIZE + threadIdx.x;
int OffsThreadInCol = threadIdx.z * BLOCK_SIZE;
src += FMUL(blockIdx.y * KER2_BLOCK_HEIGHT + OffsThreadInCol, ImgStride) + blockIdx.x * KER2_BLOCK_WIDTH + OffsThreadInRow;
dst += FMUL(blockIdx.y * KER2_BLOCK_HEIGHT + OffsThreadInCol, ImgStride) + blockIdx.x * KER2_BLOCK_WIDTH + OffsThreadInRow;
float *bl_ptr = block + OffsThreadInCol * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow;
#pragma unroll
for(unsigned int i = 0; i < BLOCK_SIZE; i++)
bl_ptr[i * KER2_SMEMBLOCK_STRIDE] = src[i * ImgStride];
//process rows
#ifdef __DEVICE_EMULATION__
__syncthreads();
#endif
CUDAsubroutineInplaceDCTvector(block + (OffsThreadInCol + threadIdx.x) * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow - threadIdx.x, 1);
//process columns
#ifdef __DEVICE_EMULATION__
__syncthreads();
#endif
CUDAsubroutineInplaceDCTvector(bl_ptr, KER2_SMEMBLOCK_STRIDE);
for(unsigned int i = 0; i < BLOCK_SIZE; i++)
dst[i * ImgStride] = bl_ptr[i * KER2_SMEMBLOCK_STRIDE];
}
/**
**************************************************************************
* Performs 8x8 block-wise Inverse Discrete Cosine Transform of the given
* coefficients plane and outputs result to the image. 2nd implementation.
* This kernel is designed to process image by blocks of blocks8x8 that
* utilizes maximum warps capacity, assuming that it is enough of 8 threads
* per block8x8.
*
* \param SrcDst [OUT] - Coefficients plane
* \param ImgStride [IN] - Stride of SrcDst
*
* \return None
*/
__global__ void CUDAkernel2IDCT(float *dst, float *src, int ImgStride){
__shared__ float block[KER2_BLOCK_HEIGHT * KER2_SMEMBLOCK_STRIDE];
int OffsThreadInRow = threadIdx.y * BLOCK_SIZE + threadIdx.x;
int OffsThreadInCol = threadIdx.z * BLOCK_SIZE;
src += FMUL(blockIdx.y * KER2_BLOCK_HEIGHT + OffsThreadInCol, ImgStride) + blockIdx.x * KER2_BLOCK_WIDTH + OffsThreadInRow;
dst += FMUL(blockIdx.y * KER2_BLOCK_HEIGHT + OffsThreadInCol, ImgStride) + blockIdx.x * KER2_BLOCK_WIDTH + OffsThreadInRow;
float *bl_ptr = block + OffsThreadInCol * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow;
#pragma unroll
for(unsigned int i = 0; i < BLOCK_SIZE; i++)
bl_ptr[i * KER2_SMEMBLOCK_STRIDE] = src[i * ImgStride];
//process rows
#ifdef __DEVICE_EMULATION__
__syncthreads();
#endif
CUDAsubroutineInplaceIDCTvector(block + (OffsThreadInCol + threadIdx.x) * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow - threadIdx.x, 1);
//process columns
#ifdef __DEVICE_EMULATION__
__syncthreads();
#endif
CUDAsubroutineInplaceIDCTvector(bl_ptr, KER2_SMEMBLOCK_STRIDE);
for(unsigned int i = 0; i < BLOCK_SIZE; i++)
dst[i * ImgStride] = bl_ptr[i * KER2_SMEMBLOCK_STRIDE];
}
| f1c9a997f5929dac4049dd20185be20ae4dca5d4.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
**************************************************************************
* \file dct8x8_kernel2.cu
* \brief Contains 2nd kernel implementations of DCT and IDCT routines, used in
* JPEG internal data processing. Optimized device code.
*
* This code implements traditional approach to forward and inverse Discrete
* Cosine Transform to blocks of image pixels (of 8x8 size), as in JPEG standard.
* The data processing is done using floating point representation.
* The routine that performs quantization of coefficients can be found in
* dct8x8_kernel_quantization.cu file.
*/
#pragma once
#include "Common.h"
#define C_a 1.387039845322148f //!< a = (2^0.5) * cos( pi / 16); Used in forward and inverse DCT.
#define C_b 1.306562964876377f //!< b = (2^0.5) * cos( pi / 8); Used in forward and inverse DCT.
#define C_c 1.175875602419359f //!< c = (2^0.5) * cos(3 * pi / 16); Used in forward and inverse DCT.
#define C_d 0.785694958387102f //!< d = (2^0.5) * cos(5 * pi / 16); Used in forward and inverse DCT.
#define C_e 0.541196100146197f //!< e = (2^0.5) * cos(3 * pi / 8); Used in forward and inverse DCT.
#define C_f 0.275899379282943f //!< f = (2^0.5) * cos(7 * pi / 16); Used in forward and inverse DCT.
/**
* Normalization constant that is used in forward and inverse DCT
*/
#define C_norm 0.3535533905932737f // 1 / (8^0.5)
/**
* Width of data block (2nd kernel)
*/
#define KER2_BLOCK_WIDTH 32
/**
* Height of data block (2nd kernel)
*/
#define KER2_BLOCK_HEIGHT 16
/**
* LOG2 of width of data block (2nd kernel)
*/
#define KER2_BW_LOG2 5
/**
* LOG2 of height of data block (2nd kernel)
*/
#define KER2_BH_LOG2 4
/**
* Stride of shared memory buffer (2nd kernel)
*/
#define KER2_SMEMBLOCK_STRIDE (KER2_BLOCK_WIDTH+1)
/**
**************************************************************************
* Performs in-place DCT of vector of 8 elements.
*
* \param Vect0 [IN/OUT] - Pointer to the first element of vector
* \param Step [IN/OUT] - Value to add to ptr to access other elements
*
* \return None
*/
__device__ void CUDAsubroutineInplaceDCTvector(float *Vect0, int Step)
{
float *Vect1 = Vect0 + Step;
float *Vect2 = Vect1 + Step;
float *Vect3 = Vect2 + Step;
float *Vect4 = Vect3 + Step;
float *Vect5 = Vect4 + Step;
float *Vect6 = Vect5 + Step;
float *Vect7 = Vect6 + Step;
float X07P = (*Vect0) + (*Vect7);
float X16P = (*Vect1) + (*Vect6);
float X25P = (*Vect2) + (*Vect5);
float X34P = (*Vect3) + (*Vect4);
float X07M = (*Vect0) - (*Vect7);
float X61M = (*Vect6) - (*Vect1);
float X25M = (*Vect2) - (*Vect5);
float X43M = (*Vect4) - (*Vect3);
float X07P34PP = X07P + X34P;
float X07P34PM = X07P - X34P;
float X16P25PP = X16P + X25P;
float X16P25PM = X16P - X25P;
(*Vect0) = C_norm * (X07P34PP + X16P25PP);
(*Vect2) = C_norm * (C_b * X07P34PM + C_e * X16P25PM);
(*Vect4) = C_norm * (X07P34PP - X16P25PP);
(*Vect6) = C_norm * (C_e * X07P34PM - C_b * X16P25PM);
(*Vect1) = C_norm * (C_a * X07M - C_c * X61M + C_d * X25M - C_f * X43M);
(*Vect3) = C_norm * (C_c * X07M + C_f * X61M - C_a * X25M + C_d * X43M);
(*Vect5) = C_norm * (C_d * X07M + C_a * X61M + C_f * X25M - C_c * X43M);
(*Vect7) = C_norm * (C_f * X07M + C_d * X61M + C_c * X25M + C_a * X43M);
}
/**
**************************************************************************
* Performs in-place IDCT of vector of 8 elements.
*
* \param Vect0 [IN/OUT] - Pointer to the first element of vector
* \param Step [IN/OUT] - Value to add to ptr to access other elements
*
* \return None
*/
__device__ void CUDAsubroutineInplaceIDCTvector(float *Vect0, int Step)
{
float *Vect1 = Vect0 + Step;
float *Vect2 = Vect1 + Step;
float *Vect3 = Vect2 + Step;
float *Vect4 = Vect3 + Step;
float *Vect5 = Vect4 + Step;
float *Vect6 = Vect5 + Step;
float *Vect7 = Vect6 + Step;
float Y04P = (*Vect0) + (*Vect4);
float Y2b6eP = C_b * (*Vect2) + C_e * (*Vect6);
float Y04P2b6ePP = Y04P + Y2b6eP;
float Y04P2b6ePM = Y04P - Y2b6eP;
float Y7f1aP3c5dPP = C_f * (*Vect7) + C_a * (*Vect1) + C_c * (*Vect3) + C_d * (*Vect5);
float Y7a1fM3d5cMP = C_a * (*Vect7) - C_f * (*Vect1) + C_d * (*Vect3) - C_c * (*Vect5);
float Y04M = (*Vect0) - (*Vect4);
float Y2e6bM = C_e * (*Vect2) - C_b * (*Vect6);
float Y04M2e6bMP = Y04M + Y2e6bM;
float Y04M2e6bMM = Y04M - Y2e6bM;
float Y1c7dM3f5aPM = C_c * (*Vect1) - C_d * (*Vect7) - C_f * (*Vect3) - C_a * (*Vect5);
float Y1d7cP3a5fMM = C_d * (*Vect1) + C_c * (*Vect7) - C_a * (*Vect3) + C_f * (*Vect5);
(*Vect0) = C_norm * (Y04P2b6ePP + Y7f1aP3c5dPP);
(*Vect7) = C_norm * (Y04P2b6ePP - Y7f1aP3c5dPP);
(*Vect4) = C_norm * (Y04P2b6ePM + Y7a1fM3d5cMP);
(*Vect3) = C_norm * (Y04P2b6ePM - Y7a1fM3d5cMP);
(*Vect1) = C_norm * (Y04M2e6bMP + Y1c7dM3f5aPM);
(*Vect5) = C_norm * (Y04M2e6bMM - Y1d7cP3a5fMM);
(*Vect2) = C_norm * (Y04M2e6bMM + Y1d7cP3a5fMM);
(*Vect6) = C_norm * (Y04M2e6bMP - Y1c7dM3f5aPM);
}
/**
**************************************************************************
* Performs 8x8 block-wise Forward Discrete Cosine Transform of the given
* image plane and outputs result to the array of coefficients. 2nd implementation.
* This kernel is designed to process image by blocks of blocks8x8 that
* utilizes maximum warps capacity, assuming that it is enough of 8 threads
* per block8x8.
*
* \param SrcDst [OUT] - Coefficients plane
* \param ImgStride [IN] - Stride of SrcDst
*
* \return None
*/
__global__ void CUDAkernel2DCT(float *dst, float *src, int ImgStride){
__shared__ float block[KER2_BLOCK_HEIGHT * KER2_SMEMBLOCK_STRIDE];
int OffsThreadInRow = threadIdx.y * BLOCK_SIZE + threadIdx.x;
int OffsThreadInCol = threadIdx.z * BLOCK_SIZE;
src += FMUL(blockIdx.y * KER2_BLOCK_HEIGHT + OffsThreadInCol, ImgStride) + blockIdx.x * KER2_BLOCK_WIDTH + OffsThreadInRow;
dst += FMUL(blockIdx.y * KER2_BLOCK_HEIGHT + OffsThreadInCol, ImgStride) + blockIdx.x * KER2_BLOCK_WIDTH + OffsThreadInRow;
float *bl_ptr = block + OffsThreadInCol * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow;
#pragma unroll
for(unsigned int i = 0; i < BLOCK_SIZE; i++)
bl_ptr[i * KER2_SMEMBLOCK_STRIDE] = src[i * ImgStride];
//process rows
#ifdef __DEVICE_EMULATION__
__syncthreads();
#endif
CUDAsubroutineInplaceDCTvector(block + (OffsThreadInCol + threadIdx.x) * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow - threadIdx.x, 1);
//process columns
#ifdef __DEVICE_EMULATION__
__syncthreads();
#endif
CUDAsubroutineInplaceDCTvector(bl_ptr, KER2_SMEMBLOCK_STRIDE);
for(unsigned int i = 0; i < BLOCK_SIZE; i++)
dst[i * ImgStride] = bl_ptr[i * KER2_SMEMBLOCK_STRIDE];
}
/**
**************************************************************************
* Performs 8x8 block-wise Inverse Discrete Cosine Transform of the given
* coefficients plane and outputs result to the image. 2nd implementation.
* This kernel is designed to process image by blocks of blocks8x8 that
* utilizes maximum warps capacity, assuming that it is enough of 8 threads
* per block8x8.
*
* \param SrcDst [OUT] - Coefficients plane
* \param ImgStride [IN] - Stride of SrcDst
*
* \return None
*/
__global__ void CUDAkernel2IDCT(float *dst, float *src, int ImgStride){
__shared__ float block[KER2_BLOCK_HEIGHT * KER2_SMEMBLOCK_STRIDE];
int OffsThreadInRow = threadIdx.y * BLOCK_SIZE + threadIdx.x;
int OffsThreadInCol = threadIdx.z * BLOCK_SIZE;
src += FMUL(blockIdx.y * KER2_BLOCK_HEIGHT + OffsThreadInCol, ImgStride) + blockIdx.x * KER2_BLOCK_WIDTH + OffsThreadInRow;
dst += FMUL(blockIdx.y * KER2_BLOCK_HEIGHT + OffsThreadInCol, ImgStride) + blockIdx.x * KER2_BLOCK_WIDTH + OffsThreadInRow;
float *bl_ptr = block + OffsThreadInCol * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow;
#pragma unroll
for(unsigned int i = 0; i < BLOCK_SIZE; i++)
bl_ptr[i * KER2_SMEMBLOCK_STRIDE] = src[i * ImgStride];
//process rows
#ifdef __DEVICE_EMULATION__
__syncthreads();
#endif
CUDAsubroutineInplaceIDCTvector(block + (OffsThreadInCol + threadIdx.x) * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow - threadIdx.x, 1);
//process columns
#ifdef __DEVICE_EMULATION__
__syncthreads();
#endif
CUDAsubroutineInplaceIDCTvector(bl_ptr, KER2_SMEMBLOCK_STRIDE);
for(unsigned int i = 0; i < BLOCK_SIZE; i++)
dst[i * ImgStride] = bl_ptr[i * KER2_SMEMBLOCK_STRIDE];
}
|
a98c6e6cae3b3cd8c632f8ac2968675447abff1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/pool_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/pooling.h"
#include "paddle/phi/kernels/funcs/sparse/convolution.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT = int>
__global__ void MaxPoolGradCudaKernel(const T* in_features_ptr,
const T* out_features_ptr,
const T* out_grad_ptr,
const IntT* rulebook_ptr,
const int n,
const int rulebook_len,
const int channels,
T* x_grad_ptr) {
phi::funcs::MaxPoolGrad<T> grad_functor;
CUDA_KERNEL_LOOP_TYPE(i, n * channels, int64_t) {
int real_i = i / channels;
int c = i - real_i * channels;
IntT in_i = rulebook_ptr[real_i];
IntT out_i = rulebook_ptr[real_i + rulebook_len];
grad_functor.compute(in_features_ptr[in_i * channels + c],
out_features_ptr[out_i * channels + c],
out_grad_ptr[out_i * channels + c],
1,
&x_grad_ptr[in_i * channels + c]);
}
}
template <typename T, typename IntT = int>
void MaxPoolCooGradGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& rulebook,
const DenseTensor& counter,
const SparseCooTensor& out,
const SparseCooTensor& out_grad,
const std::vector<int>& kernel_sizes,
SparseCooTensor* x_grad) {
int kernel_size = kernel_sizes[0] * kernel_sizes[1] * kernel_sizes[2];
const int in_channels = x.dims()[4];
int rulebook_len = rulebook.dims()[1];
const IntT* rulebook_ptr = rulebook.data<IntT>();
std::vector<int> offsets(kernel_size + 1);
const int* counter_ptr = counter.data<int>();
phi::funcs::sparse::PrefixSum(counter_ptr, &offsets[0], kernel_size);
const T* in_features_ptr = x.values().data<T>();
const T* out_features_ptr = out.values().data<T>();
const T* out_grad_ptr = out_grad.values().data<T>();
// TODO(zhangkaihuo): call phi::sparse::EmptyLike
DenseTensor x_grad_indices = phi::EmptyLike<IntT>(dev_ctx, x.indices());
DenseTensor x_grad_values = phi::EmptyLike<T>(dev_ctx, x.values());
x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true);
T* x_grad_ptr = x_grad_values.data<T>();
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, &x_grad_values, static_cast<T>(0.0f));
phi::Copy<GPUContext>(
dev_ctx, x.indices(), dev_ctx.GetPlace(), false, &x_grad_indices);
for (int i = 0; i < kernel_size; i++) {
if (counter_ptr[i] <= 0) {
continue;
}
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, counter_ptr[i] * in_channels, 1);
hipLaunchKernelGGL(( MaxPoolGradCudaKernel<T, IntT>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(), in_features_ptr,
out_features_ptr,
out_grad_ptr,
rulebook_ptr + offsets[i],
counter_ptr[i],
rulebook_len,
in_channels,
x_grad_ptr);
}
}
template <typename T, typename Context>
void MaxPoolCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& rulebook,
const DenseTensor& counter,
const SparseCooTensor& out,
const SparseCooTensor& out_grad,
const std::vector<int>& kernel_sizes,
SparseCooTensor* x_grad) {
PD_VISIT_BASE_INTEGRAL_TYPES(
x.indices().dtype(), "MaxPoolCooGradGPUKernel", ([&] {
MaxPoolCooGradGPUKernel<T, data_t>(
dev_ctx, x, rulebook, counter, out, out_grad, kernel_sizes, x_grad);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(maxpool_coo_grad,
GPU,
ALL_LAYOUT,
phi::sparse::MaxPoolCooGradKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
| a98c6e6cae3b3cd8c632f8ac2968675447abff1e.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/pool_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/pooling.h"
#include "paddle/phi/kernels/funcs/sparse/convolution.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT = int>
__global__ void MaxPoolGradCudaKernel(const T* in_features_ptr,
const T* out_features_ptr,
const T* out_grad_ptr,
const IntT* rulebook_ptr,
const int n,
const int rulebook_len,
const int channels,
T* x_grad_ptr) {
phi::funcs::MaxPoolGrad<T> grad_functor;
CUDA_KERNEL_LOOP_TYPE(i, n * channels, int64_t) {
int real_i = i / channels;
int c = i - real_i * channels;
IntT in_i = rulebook_ptr[real_i];
IntT out_i = rulebook_ptr[real_i + rulebook_len];
grad_functor.compute(in_features_ptr[in_i * channels + c],
out_features_ptr[out_i * channels + c],
out_grad_ptr[out_i * channels + c],
1,
&x_grad_ptr[in_i * channels + c]);
}
}
template <typename T, typename IntT = int>
void MaxPoolCooGradGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& rulebook,
const DenseTensor& counter,
const SparseCooTensor& out,
const SparseCooTensor& out_grad,
const std::vector<int>& kernel_sizes,
SparseCooTensor* x_grad) {
int kernel_size = kernel_sizes[0] * kernel_sizes[1] * kernel_sizes[2];
const int in_channels = x.dims()[4];
int rulebook_len = rulebook.dims()[1];
const IntT* rulebook_ptr = rulebook.data<IntT>();
std::vector<int> offsets(kernel_size + 1);
const int* counter_ptr = counter.data<int>();
phi::funcs::sparse::PrefixSum(counter_ptr, &offsets[0], kernel_size);
const T* in_features_ptr = x.values().data<T>();
const T* out_features_ptr = out.values().data<T>();
const T* out_grad_ptr = out_grad.values().data<T>();
// TODO(zhangkaihuo): call phi::sparse::EmptyLike
DenseTensor x_grad_indices = phi::EmptyLike<IntT>(dev_ctx, x.indices());
DenseTensor x_grad_values = phi::EmptyLike<T>(dev_ctx, x.values());
x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true);
T* x_grad_ptr = x_grad_values.data<T>();
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, &x_grad_values, static_cast<T>(0.0f));
phi::Copy<GPUContext>(
dev_ctx, x.indices(), dev_ctx.GetPlace(), false, &x_grad_indices);
for (int i = 0; i < kernel_size; i++) {
if (counter_ptr[i] <= 0) {
continue;
}
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, counter_ptr[i] * in_channels, 1);
MaxPoolGradCudaKernel<T, IntT>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(in_features_ptr,
out_features_ptr,
out_grad_ptr,
rulebook_ptr + offsets[i],
counter_ptr[i],
rulebook_len,
in_channels,
x_grad_ptr);
}
}
template <typename T, typename Context>
void MaxPoolCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& rulebook,
const DenseTensor& counter,
const SparseCooTensor& out,
const SparseCooTensor& out_grad,
const std::vector<int>& kernel_sizes,
SparseCooTensor* x_grad) {
PD_VISIT_BASE_INTEGRAL_TYPES(
x.indices().dtype(), "MaxPoolCooGradGPUKernel", ([&] {
MaxPoolCooGradGPUKernel<T, data_t>(
dev_ctx, x, rulebook, counter, out, out_grad, kernel_sizes, x_grad);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(maxpool_coo_grad,
GPU,
ALL_LAYOUT,
phi::sparse::MaxPoolCooGradKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
|
1af09de157655312261c5df3a4a7fa9cddb8fa95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h> // Reference???
const int N = 1024; const int blocksize = 16;
__global__ void add_matrix( float *a, float *b, float *c, int N, float rf, float pirkplus1) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*N;
if ( i < N && j < N )
c[index] = rf*__sinf((float)(i+1)*(float)(j+1)*pirkplus1);
}
int main(void){
float *a = new float[N*N]; float *b = new float[N*N]; float *c = new float[N*N];
float two=2.0f, one=1.0f;
float pi,rkplus1,rf; // Generate square orthonormal matrices
pi = two * asin(one);
rkplus1 = one/(float(N) + one);
rf = sqrt(two*rkplus1);
for ( int i = 0; i < N*N; ++i ) {
a[i] = 1.0f; b[i] = 3.5f; }
float *ad, *bd, *cd;
const int size = N*N*sizeof(float);
hipMalloc( (void**)&ad, size );
hipMalloc( (void**)&bd, size );
hipMalloc( (void**)&cd, size );
hipMemcpy( ad, a, size, hipMemcpyHostToDevice ); // COPY DATA TO GPU
hipMemcpy( bd, b, size, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, blocksize );
dim3 dimGrid( N/dimBlock.x, N/dimBlock.y );
hipLaunchKernelGGL((
add_matrix), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd, cd, N, rf, pi*rkplus1 );
hipMemcpy( c, cd, size, hipMemcpyDeviceToHost );
for (int i = 0; i < 10;i++) {
printf(" %7.5f", c[i]);
}
printf("\n");
hipFree( ad ); hipFree( bd ); hipFree( cd ); // CLEAN UP, RETURN
return 0;
}
| 1af09de157655312261c5df3a4a7fa9cddb8fa95.cu | #include <stdio.h> // Reference???
const int N = 1024; const int blocksize = 16;
__global__ void add_matrix( float *a, float *b, float *c, int N, float rf, float pirkplus1) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*N;
if ( i < N && j < N )
c[index] = rf*__sinf((float)(i+1)*(float)(j+1)*pirkplus1);
}
int main(void){
float *a = new float[N*N]; float *b = new float[N*N]; float *c = new float[N*N];
float two=2.0f, one=1.0f;
float pi,rkplus1,rf; // Generate square orthonormal matrices
pi = two * asin(one);
rkplus1 = one/(float(N) + one);
rf = sqrt(two*rkplus1);
for ( int i = 0; i < N*N; ++i ) {
a[i] = 1.0f; b[i] = 3.5f; }
float *ad, *bd, *cd;
const int size = N*N*sizeof(float);
cudaMalloc( (void**)&ad, size );
cudaMalloc( (void**)&bd, size );
cudaMalloc( (void**)&cd, size );
cudaMemcpy( ad, a, size, cudaMemcpyHostToDevice ); // COPY DATA TO GPU
cudaMemcpy( bd, b, size, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, blocksize );
dim3 dimGrid( N/dimBlock.x, N/dimBlock.y );
add_matrix<<<dimGrid, dimBlock>>>( ad, bd, cd, N, rf, pi*rkplus1 );
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
for (int i = 0; i < 10;i++) {
printf(" %7.5f", c[i]);
}
printf("\n");
cudaFree( ad ); cudaFree( bd ); cudaFree( cd ); // CLEAN UP, RETURN
return 0;
}
|
bottomBoundaryKernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void bottomBoundaryKernel(double* temperature, int block_size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < block_size) {
temperature[(block_size + 2) * (block_size + 1) + (1 + i)] = 1.0;
}
} | bottomBoundaryKernel.cu | #include "includes.h"
__global__ void bottomBoundaryKernel(double* temperature, int block_size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < block_size) {
temperature[(block_size + 2) * (block_size + 1) + (1 + i)] = 1.0;
}
} |
3e388c3b158e61fff0a4cbfb7f313ee27988e569.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include "nvshmem.h"
#include "nvshmemx.h"
#ifdef ENABLE_MPI_SUPPORT
#include "mpi.h"
#endif
#undef CUDA_CHECK
#define CUDA_CHECK(stmt) \
do { \
hipError_t result = (stmt); \
if (hipSuccess != result) { \
fprintf(stderr, "[%s:%d] cuda failed with %s \n", __FILE__, __LINE__, \
hipGetErrorString(result)); \
exit(-1); \
} \
} while (0)
#define NVSHMEM_CHECK(stmt) \
do { \
int result = (stmt); \
if (NVSHMEMX_SUCCESS != result) { \
fprintf(stderr, "[%s:%d] nvshmem failed with error %d \n", __FILE__, __LINE__, \
result); \
exit(-1); \
} \
} while (0)
__global__ void reduce_ring(int *target, int mype, int npes) {
int peer = (mype + 1) % npes;
int lvalue = mype;
for (int i = 1; i < npes; i++) {
nvshmem_int_p(target, lvalue, peer);
nvshmem_barrier_all();
lvalue = *target + mype;
}
}
int main(int c, char *v[]) {
int mype, npes;
#ifdef ENABLE_MPI_SUPPORT
bool use_mpi = false;
char *value = getenv("NVSHMEMTEST_USE_MPI_LAUNCHER");
if (value) use_mpi = atoi(value);
#endif
#ifdef ENABLE_MPI_SUPPORT
if (use_mpi) {
MPI_Init(&c, &v);
int rank, nranks;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
MPI_Comm mpi_comm = MPI_COMM_WORLD;
nvshmemx_init_attr_t attr;
attr.mpi_comm = &mpi_comm;
nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr);
} else
nvshmem_init();
#else
nvshmem_init();
#endif
mype = nvshmem_my_pe();
npes = nvshmem_n_pes();
// application picks the device each PE will use
CUDA_CHECK(hipSetDevice(mype));
double *u = (double *)nvshmem_malloc(sizeof(double));
void *args[] = {&u, &mype, &npes};
dim3 dimBlock(1);
dim3 dimGrid(1);
NVSHMEM_CHECK(
nvshmemx_collective_launch((const void *)reduce_ring, dimGrid, dimBlock, args, 0, 0));
CUDA_CHECK(hipDeviceSynchronize());
printf("[%d of %d] run complete \n", mype, npes);
nvshmem_free(u);
nvshmem_finalize();
#ifdef ENABLE_MPI_SUPPORT
if (use_mpi) MPI_Finalize();
#endif
return 0;
}
| 3e388c3b158e61fff0a4cbfb7f313ee27988e569.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include "nvshmem.h"
#include "nvshmemx.h"
#ifdef ENABLE_MPI_SUPPORT
#include "mpi.h"
#endif
#undef CUDA_CHECK
#define CUDA_CHECK(stmt) \
do { \
cudaError_t result = (stmt); \
if (cudaSuccess != result) { \
fprintf(stderr, "[%s:%d] cuda failed with %s \n", __FILE__, __LINE__, \
cudaGetErrorString(result)); \
exit(-1); \
} \
} while (0)
#define NVSHMEM_CHECK(stmt) \
do { \
int result = (stmt); \
if (NVSHMEMX_SUCCESS != result) { \
fprintf(stderr, "[%s:%d] nvshmem failed with error %d \n", __FILE__, __LINE__, \
result); \
exit(-1); \
} \
} while (0)
__global__ void reduce_ring(int *target, int mype, int npes) {
int peer = (mype + 1) % npes;
int lvalue = mype;
for (int i = 1; i < npes; i++) {
nvshmem_int_p(target, lvalue, peer);
nvshmem_barrier_all();
lvalue = *target + mype;
}
}
int main(int c, char *v[]) {
int mype, npes;
#ifdef ENABLE_MPI_SUPPORT
bool use_mpi = false;
char *value = getenv("NVSHMEMTEST_USE_MPI_LAUNCHER");
if (value) use_mpi = atoi(value);
#endif
#ifdef ENABLE_MPI_SUPPORT
if (use_mpi) {
MPI_Init(&c, &v);
int rank, nranks;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
MPI_Comm mpi_comm = MPI_COMM_WORLD;
nvshmemx_init_attr_t attr;
attr.mpi_comm = &mpi_comm;
nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr);
} else
nvshmem_init();
#else
nvshmem_init();
#endif
mype = nvshmem_my_pe();
npes = nvshmem_n_pes();
// application picks the device each PE will use
CUDA_CHECK(cudaSetDevice(mype));
double *u = (double *)nvshmem_malloc(sizeof(double));
void *args[] = {&u, &mype, &npes};
dim3 dimBlock(1);
dim3 dimGrid(1);
NVSHMEM_CHECK(
nvshmemx_collective_launch((const void *)reduce_ring, dimGrid, dimBlock, args, 0, 0));
CUDA_CHECK(cudaDeviceSynchronize());
printf("[%d of %d] run complete \n", mype, npes);
nvshmem_free(u);
nvshmem_finalize();
#ifdef ENABLE_MPI_SUPPORT
if (use_mpi) MPI_Finalize();
#endif
return 0;
}
|
a591fb7ee24279b7155bd8e5f53e8b768f9aa8b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Groute: An Asynchronous Multi-GPU Programming Framework
// http://www.github.com/groute/groute
// Copyright (c) 2017, A. Barak
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the names of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <gflags/gflags.h>
#include <groute/event_pool.h>
#include <groute/fused_distributed_worklist.h>
#include <groute/cta_work.h>
#include <groute/graphs/csr_graph.h>
#include <groute/graphs/traversal_algo.h>
#include <groute/graphs/fused_solver.h>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include "sssp_common.h"
DECLARE_int32(source_node);
namespace sssp {
namespace opt {
const distance_t INF = UINT_MAX;
struct DistanceData
{
index_t node;
distance_t distance;
__device__ __host__ __forceinline__ DistanceData(index_t node, distance_t distance) : node(node), distance(distance) { }
__device__ __host__ __forceinline__ DistanceData() : node(INF), distance(INF) { }
};
typedef index_t local_work_t;
typedef DistanceData remote_work_t;
__global__ void SSSPInit(distance_t* distances, int nnodes)
{
int tid = GTID;
if (tid < nnodes)
{
distances[tid] = INF;
}
}
template<
typename TGraph,
typename TWeightDatum, typename TDistanceDatum>
struct SSSPWorkNP
{
template<typename WorkSource>
__device__ static void work(
const WorkSource& work_source,
groute::dev::CircularWorklist<local_work_t>& rwl_in,
groute::dev::CircularWorklist<remote_work_t>& rwl_out,
const TGraph& graph,
TWeightDatum& edge_weights, TDistanceDatum& node_distances
)
{
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads)
{
groute::dev::np_local<distance_t> np_local = { 0, 0, 0 };
if (i < work_size)
{
index_t node = work_source.get_work(i);
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
np_local.meta_data = node_distances.get_item(node);
}
groute::dev::CTAWorkScheduler<distance_t>::template schedule(
np_local,
[&graph, &edge_weights, &node_distances, &rwl_in, &rwl_out](index_t edge, distance_t distance)
{
index_t dest = graph.edge_dest(edge);
distance_t weight = edge_weights.get_item(edge);
if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight))
{
int is_owned = graph.owns(dest);
// TODO: move ballot logic to a device structure
int owned_mask = __ballot(is_owned ? 1 : 0);
int remote_mask = __ballot(is_owned ? 0 : 1);
if (is_owned)
{
int high_leader = __ffs(owned_mask) - 1;
int thread_offset = __popc(owned_mask & ((1 << lane_id()) - 1));
rwl_in.prepend_warp(dest, high_leader, __popc(owned_mask), thread_offset);
}
else
{
int low_leader = __ffs(remote_mask) - 1;
int thread_offset = __popc(remote_mask & ((1 << lane_id()) - 1));
rwl_out.append_warp(DistanceData(dest, distance + weight), low_leader, __popc(remote_mask), thread_offset);
}
}
}
);
}
}
};
template<
typename TGraph,
typename TWeightDatum, typename TDistanceDatum>
struct SSSPWork
{
template<typename WorkSource>
__device__ static void work(
const WorkSource& work_source,
groute::dev::CircularWorklist<local_work_t>& rwl_in,
groute::dev::CircularWorklist<remote_work_t>& rwl_out,
const TGraph& graph,
TWeightDatum& edge_weights, TDistanceDatum& node_distances
)
{
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads)
{
index_t node = work_source.get_work(i);
distance_t distance = node_distances.get_item(node);
for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge)
{
index_t dest = graph.edge_dest(edge);
distance_t weight = edge_weights.get_item(edge);
if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight))
{
int is_owned = graph.owns(dest);
// TODO: move ballot logic to a device structure
int owned_mask = __ballot(is_owned ? 1 : 0);
int remote_mask = __ballot(is_owned ? 0 : 1);
if (is_owned)
{
int high_leader = __ffs(owned_mask) - 1;
int thread_offset = __popc(owned_mask & ((1 << lane_id()) - 1));
rwl_in.prepend_warp(dest, high_leader, __popc(owned_mask), thread_offset);
}
else
{
int low_leader = __ffs(remote_mask) - 1;
int thread_offset = __popc(remote_mask & ((1 << lane_id()) - 1));
rwl_out.append_warp(DistanceData(dest, distance + weight), low_leader, __popc(remote_mask), thread_offset);
}
}
}
}
}
};
struct SplitOps
{
private:
groute::graphs::dev::CSRGraphSeg m_graph_seg;
groute::graphs::dev::GraphDatum<distance_t> m_distances_datum;
public:
template<typename...UnusedData>
SplitOps(
const groute::graphs::dev::CSRGraphSeg& graph_seg,
const groute::graphs::dev::GraphDatumSeg<distance_t>& weights_datum,
const groute::graphs::dev::GraphDatum<distance_t>& distances_datum,
UnusedData&... data)
: m_graph_seg(graph_seg), m_distances_datum(distances_datum)
{
}
__device__ __forceinline__ groute::opt::SplitFlags on_receive(const remote_work_t& work)
{
if (m_graph_seg.owns(work.node))
{
return (work.distance < atomicMin(m_distances_datum.get_item_ptr(work.node), work.distance))
? groute::opt::SF_Take
: groute::opt::SF_None; // filter
}
return groute::opt::SF_Pass;
}
__device__ __forceinline__ bool is_high_prio(const local_work_t& work, const distance_t& global_prio)
{
return m_distances_datum[work] <= global_prio;
}
__device__ __forceinline__ groute::opt::SplitFlags on_send(local_work_t work)
{
return (m_graph_seg.owns(work))
? groute::opt::SF_Take
: groute::opt::SF_Pass;
}
__device__ __forceinline__ remote_work_t pack(local_work_t work)
{
return DistanceData(work, m_distances_datum.get_item(work));
}
__device__ __forceinline__ local_work_t unpack(const remote_work_t& work)
{
return work.node;
}
};
template<
typename TGraph,
template <typename> class TWeightDatum, template <typename> class TDistanceDatum
>
struct FusedProblem
{
TGraph m_graph;
TWeightDatum<distance_t> m_weights_datum;
TDistanceDatum<distance_t> m_distances_datum;
typedef SSSPWork<TGraph, TWeightDatum<distance_t>, TDistanceDatum<distance_t>> WorkType;
typedef SSSPWorkNP<TGraph, TWeightDatum<distance_t>, TDistanceDatum<distance_t>> WorkTypeNP;
public:
FusedProblem(const TGraph& graph, const TWeightDatum<distance_t>& weights_datum, const TDistanceDatum<distance_t>& distances_datum) :
m_graph(graph), m_weights_datum(weights_datum), m_distances_datum(distances_datum)
{
}
// Initial init. Called before a global CPU+GPU barrier
void Init(groute::Stream& stream) const
{
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_distances_datum.size);
SSSPInit << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_distances_datum.data_ptr, m_distances_datum.size);
}
bool DoFusedInit(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low,
groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out,
int fused_chunk_size, distance_t global_prio,
volatile int *high_work_counter, volatile int *low_work_counter,
uint32_t *kernel_internal_counter, volatile int *send_signal_ptr,
cub::GridBarrierLifetime& barrier_lifetime,
dim3 grid_dims, dim3 block_dims, groute::Stream& stream)
{
return false; // no work was done here
}
void DoFusedWork(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low,
groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out,
int fused_chunk_size, distance_t global_prio,
volatile int *high_work_counter, volatile int *low_work_counter,
uint32_t *kernel_internal_counter, volatile int *send_signal_ptr,
cub::GridBarrierLifetime& barrier_lifetime,
dim3 grid_dims, dim3 block_dims, groute::Stream& stream)
{
if (FLAGS_iteration_fusion)
{
if (FLAGS_cta_np)
{
groute::FusedWork <
groute::NeverStop, local_work_t, remote_work_t, distance_t, SplitOps,
WorkTypeNP,
TGraph, TWeightDatum<distance_t>, TDistanceDatum<distance_t> >
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
sssp::opt::SplitOps(m_graph, m_weights_datum, m_distances_datum),
m_graph, m_weights_datum, m_distances_datum
);
}
else
{
groute::FusedWork <
groute::NeverStop, local_work_t, remote_work_t, distance_t, SplitOps,
WorkType,
TGraph, TWeightDatum<distance_t>, TDistanceDatum<distance_t> >
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
sssp::opt::SplitOps(m_graph, m_weights_datum, m_distances_datum),
m_graph, m_weights_datum, m_distances_datum
);
}
}
else
{
if (FLAGS_cta_np)
{
groute::FusedWork <
groute::RunNTimes<1>, local_work_t, remote_work_t, distance_t, SplitOps,
WorkTypeNP,
TGraph, TWeightDatum<distance_t>, TDistanceDatum<distance_t> >
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
sssp::opt::SplitOps(m_graph, m_weights_datum, m_distances_datum),
m_graph, m_weights_datum, m_distances_datum
);
}
else
{
groute::FusedWork <
groute::RunNTimes<1>, local_work_t, remote_work_t, distance_t, SplitOps,
WorkType,
TGraph, TWeightDatum<distance_t>, TDistanceDatum<distance_t> >
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
sssp::opt::SplitOps(m_graph, m_weights_datum, m_distances_datum),
m_graph, m_weights_datum, m_distances_datum
);
}
}
}
};
struct Algo
{
static const char* NameLower() { return "sssp"; }
static const char* Name() { return "SSSP"; }
static void Init(
groute::graphs::traversal::Context<sssp::opt::Algo>& context,
groute::graphs::multi::CSRGraphAllocator& graph_manager,
groute::router::Router<remote_work_t>& worklist_router,
groute::opt::DistributedWorklist<local_work_t, remote_work_t, SplitOps>& distributed_worklist)
{
index_t source_node = min(max(0, FLAGS_source_node), context.host_graph.nnodes - 1);
auto partitioner = graph_manager.GetGraphPartitioner();
if (partitioner->NeedsReverseLookup())
{
source_node = partitioner->GetReverseLookupFunc()(source_node);
}
// report the initial work
distributed_worklist.ReportHighPrioWork(1, 0, "Host", groute::Device::Host, true);
std::vector<remote_work_t> initial_work;
initial_work.push_back(remote_work_t(source_node, 0));
groute::router::ISender<remote_work_t>* work_sender = worklist_router.GetSender(groute::Device::Host);
work_sender->Send(
groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event());
work_sender->Shutdown();
}
template<
typename TGraphAllocator,
template <typename> class TWeightDatum, template <typename> class TDistanceDatum, typename...UnusedData>
static std::vector<distance_t> Gather(
TGraphAllocator& graph_allocator,
TWeightDatum<distance_t>& weights_datum, TDistanceDatum<distance_t>& distances_datum,
UnusedData&... data)
{
graph_allocator.GatherDatum(distances_datum);
return distances_datum.GetHostData();
}
template<
template <typename> class TWeightDatum,
template <typename> class TDistanceDatum,
typename...UnusedData>
static std::vector<distance_t> Host(
groute::graphs::host::CSRGraph& graph,
TWeightDatum<distance_t>& weights_datum, TDistanceDatum<distance_t>& distances_datum,
UnusedData&... data)
{
return SSSPHostNaive(graph, weights_datum.GetHostDataPtr(), min(max(0, FLAGS_source_node), graph.nnodes - 1));
}
static int Output(const char *file, const std::vector<distance_t>& distances)
{
return SSSPOutput(file, distances);
}
static int CheckErrors(const std::vector<distance_t>& distances, const std::vector<distance_t>& regression)
{
return SSSPCheckErrors(distances, regression);
}
};
}
}
bool TestSSSPAsyncMultiOptimized(int ngpus)
{
typedef sssp::opt::FusedProblem<groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatumSeg, groute::graphs::dev::GraphDatum> ProblemType;
typedef groute::graphs::traversal::FusedSolver<
sssp::opt::Algo, ProblemType,
sssp::opt::local_work_t , sssp::opt::remote_work_t, distance_t,
sssp::opt::SplitOps,
groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatumSeg<distance_t>, groute::graphs::dev::GraphDatum<distance_t>> SolverType;
groute::graphs::traversal::__MultiRunner__Opt__ <
sssp::opt::Algo,
ProblemType,
SolverType,
sssp::opt::SplitOps,
sssp::opt::local_work_t,
sssp::opt::remote_work_t,
groute::graphs::multi::EdgeInputDatum<distance_t>,
groute::graphs::multi::NodeOutputGlobalDatum<distance_t> > runner;
groute::graphs::multi::EdgeInputDatum<distance_t> edge_weights;
groute::graphs::multi::NodeOutputGlobalDatum<distance_t> node_distances;
return runner(ngpus, edge_weights, node_distances);
}
| a591fb7ee24279b7155bd8e5f53e8b768f9aa8b0.cu | // Groute: An Asynchronous Multi-GPU Programming Framework
// http://www.github.com/groute/groute
// Copyright (c) 2017, A. Barak
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the names of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <gflags/gflags.h>
#include <groute/event_pool.h>
#include <groute/fused_distributed_worklist.h>
#include <groute/cta_work.h>
#include <groute/graphs/csr_graph.h>
#include <groute/graphs/traversal_algo.h>
#include <groute/graphs/fused_solver.h>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include "sssp_common.h"
DECLARE_int32(source_node);
namespace sssp {
namespace opt {
const distance_t INF = UINT_MAX;
struct DistanceData
{
index_t node;
distance_t distance;
__device__ __host__ __forceinline__ DistanceData(index_t node, distance_t distance) : node(node), distance(distance) { }
__device__ __host__ __forceinline__ DistanceData() : node(INF), distance(INF) { }
};
typedef index_t local_work_t;
typedef DistanceData remote_work_t;
__global__ void SSSPInit(distance_t* distances, int nnodes)
{
int tid = GTID;
if (tid < nnodes)
{
distances[tid] = INF;
}
}
template<
typename TGraph,
typename TWeightDatum, typename TDistanceDatum>
struct SSSPWorkNP
{
template<typename WorkSource>
__device__ static void work(
const WorkSource& work_source,
groute::dev::CircularWorklist<local_work_t>& rwl_in,
groute::dev::CircularWorklist<remote_work_t>& rwl_out,
const TGraph& graph,
TWeightDatum& edge_weights, TDistanceDatum& node_distances
)
{
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads)
{
groute::dev::np_local<distance_t> np_local = { 0, 0, 0 };
if (i < work_size)
{
index_t node = work_source.get_work(i);
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
np_local.meta_data = node_distances.get_item(node);
}
groute::dev::CTAWorkScheduler<distance_t>::template schedule(
np_local,
[&graph, &edge_weights, &node_distances, &rwl_in, &rwl_out](index_t edge, distance_t distance)
{
index_t dest = graph.edge_dest(edge);
distance_t weight = edge_weights.get_item(edge);
if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight))
{
int is_owned = graph.owns(dest);
// TODO: move ballot logic to a device structure
int owned_mask = __ballot(is_owned ? 1 : 0);
int remote_mask = __ballot(is_owned ? 0 : 1);
if (is_owned)
{
int high_leader = __ffs(owned_mask) - 1;
int thread_offset = __popc(owned_mask & ((1 << lane_id()) - 1));
rwl_in.prepend_warp(dest, high_leader, __popc(owned_mask), thread_offset);
}
else
{
int low_leader = __ffs(remote_mask) - 1;
int thread_offset = __popc(remote_mask & ((1 << lane_id()) - 1));
rwl_out.append_warp(DistanceData(dest, distance + weight), low_leader, __popc(remote_mask), thread_offset);
}
}
}
);
}
}
};
template<
typename TGraph,
typename TWeightDatum, typename TDistanceDatum>
struct SSSPWork
{
template<typename WorkSource>
__device__ static void work(
const WorkSource& work_source,
groute::dev::CircularWorklist<local_work_t>& rwl_in,
groute::dev::CircularWorklist<remote_work_t>& rwl_out,
const TGraph& graph,
TWeightDatum& edge_weights, TDistanceDatum& node_distances
)
{
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads)
{
index_t node = work_source.get_work(i);
distance_t distance = node_distances.get_item(node);
for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge)
{
index_t dest = graph.edge_dest(edge);
distance_t weight = edge_weights.get_item(edge);
if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight))
{
int is_owned = graph.owns(dest);
// TODO: move ballot logic to a device structure
int owned_mask = __ballot(is_owned ? 1 : 0);
int remote_mask = __ballot(is_owned ? 0 : 1);
if (is_owned)
{
int high_leader = __ffs(owned_mask) - 1;
int thread_offset = __popc(owned_mask & ((1 << lane_id()) - 1));
rwl_in.prepend_warp(dest, high_leader, __popc(owned_mask), thread_offset);
}
else
{
int low_leader = __ffs(remote_mask) - 1;
int thread_offset = __popc(remote_mask & ((1 << lane_id()) - 1));
rwl_out.append_warp(DistanceData(dest, distance + weight), low_leader, __popc(remote_mask), thread_offset);
}
}
}
}
}
};
struct SplitOps
{
private:
groute::graphs::dev::CSRGraphSeg m_graph_seg;
groute::graphs::dev::GraphDatum<distance_t> m_distances_datum;
public:
template<typename...UnusedData>
SplitOps(
const groute::graphs::dev::CSRGraphSeg& graph_seg,
const groute::graphs::dev::GraphDatumSeg<distance_t>& weights_datum,
const groute::graphs::dev::GraphDatum<distance_t>& distances_datum,
UnusedData&... data)
: m_graph_seg(graph_seg), m_distances_datum(distances_datum)
{
}
__device__ __forceinline__ groute::opt::SplitFlags on_receive(const remote_work_t& work)
{
if (m_graph_seg.owns(work.node))
{
return (work.distance < atomicMin(m_distances_datum.get_item_ptr(work.node), work.distance))
? groute::opt::SF_Take
: groute::opt::SF_None; // filter
}
return groute::opt::SF_Pass;
}
__device__ __forceinline__ bool is_high_prio(const local_work_t& work, const distance_t& global_prio)
{
return m_distances_datum[work] <= global_prio;
}
__device__ __forceinline__ groute::opt::SplitFlags on_send(local_work_t work)
{
return (m_graph_seg.owns(work))
? groute::opt::SF_Take
: groute::opt::SF_Pass;
}
__device__ __forceinline__ remote_work_t pack(local_work_t work)
{
return DistanceData(work, m_distances_datum.get_item(work));
}
__device__ __forceinline__ local_work_t unpack(const remote_work_t& work)
{
return work.node;
}
};
template<
typename TGraph,
template <typename> class TWeightDatum, template <typename> class TDistanceDatum
>
struct FusedProblem
{
TGraph m_graph;
TWeightDatum<distance_t> m_weights_datum;
TDistanceDatum<distance_t> m_distances_datum;
typedef SSSPWork<TGraph, TWeightDatum<distance_t>, TDistanceDatum<distance_t>> WorkType;
typedef SSSPWorkNP<TGraph, TWeightDatum<distance_t>, TDistanceDatum<distance_t>> WorkTypeNP;
public:
FusedProblem(const TGraph& graph, const TWeightDatum<distance_t>& weights_datum, const TDistanceDatum<distance_t>& distances_datum) :
m_graph(graph), m_weights_datum(weights_datum), m_distances_datum(distances_datum)
{
}
// Initial init. Called before a global CPU+GPU barrier
void Init(groute::Stream& stream) const
{
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_distances_datum.size);
SSSPInit << < grid_dims, block_dims, 0, stream.cuda_stream >> >(
m_distances_datum.data_ptr, m_distances_datum.size);
}
bool DoFusedInit(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low,
groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out,
int fused_chunk_size, distance_t global_prio,
volatile int *high_work_counter, volatile int *low_work_counter,
uint32_t *kernel_internal_counter, volatile int *send_signal_ptr,
cub::GridBarrierLifetime& barrier_lifetime,
dim3 grid_dims, dim3 block_dims, groute::Stream& stream)
{
return false; // no work was done here
}
void DoFusedWork(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low,
groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out,
int fused_chunk_size, distance_t global_prio,
volatile int *high_work_counter, volatile int *low_work_counter,
uint32_t *kernel_internal_counter, volatile int *send_signal_ptr,
cub::GridBarrierLifetime& barrier_lifetime,
dim3 grid_dims, dim3 block_dims, groute::Stream& stream)
{
if (FLAGS_iteration_fusion)
{
if (FLAGS_cta_np)
{
groute::FusedWork <
groute::NeverStop, local_work_t, remote_work_t, distance_t, SplitOps,
WorkTypeNP,
TGraph, TWeightDatum<distance_t>, TDistanceDatum<distance_t> >
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
sssp::opt::SplitOps(m_graph, m_weights_datum, m_distances_datum),
m_graph, m_weights_datum, m_distances_datum
);
}
else
{
groute::FusedWork <
groute::NeverStop, local_work_t, remote_work_t, distance_t, SplitOps,
WorkType,
TGraph, TWeightDatum<distance_t>, TDistanceDatum<distance_t> >
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
sssp::opt::SplitOps(m_graph, m_weights_datum, m_distances_datum),
m_graph, m_weights_datum, m_distances_datum
);
}
}
else
{
if (FLAGS_cta_np)
{
groute::FusedWork <
groute::RunNTimes<1>, local_work_t, remote_work_t, distance_t, SplitOps,
WorkTypeNP,
TGraph, TWeightDatum<distance_t>, TDistanceDatum<distance_t> >
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
sssp::opt::SplitOps(m_graph, m_weights_datum, m_distances_datum),
m_graph, m_weights_datum, m_distances_datum
);
}
else
{
groute::FusedWork <
groute::RunNTimes<1>, local_work_t, remote_work_t, distance_t, SplitOps,
WorkType,
TGraph, TWeightDatum<distance_t>, TDistanceDatum<distance_t> >
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
sssp::opt::SplitOps(m_graph, m_weights_datum, m_distances_datum),
m_graph, m_weights_datum, m_distances_datum
);
}
}
}
};
struct Algo
{
static const char* NameLower() { return "sssp"; }
static const char* Name() { return "SSSP"; }
static void Init(
groute::graphs::traversal::Context<sssp::opt::Algo>& context,
groute::graphs::multi::CSRGraphAllocator& graph_manager,
groute::router::Router<remote_work_t>& worklist_router,
groute::opt::DistributedWorklist<local_work_t, remote_work_t, SplitOps>& distributed_worklist)
{
index_t source_node = min(max(0, FLAGS_source_node), context.host_graph.nnodes - 1);
auto partitioner = graph_manager.GetGraphPartitioner();
if (partitioner->NeedsReverseLookup())
{
source_node = partitioner->GetReverseLookupFunc()(source_node);
}
// report the initial work
distributed_worklist.ReportHighPrioWork(1, 0, "Host", groute::Device::Host, true);
std::vector<remote_work_t> initial_work;
initial_work.push_back(remote_work_t(source_node, 0));
groute::router::ISender<remote_work_t>* work_sender = worklist_router.GetSender(groute::Device::Host);
work_sender->Send(
groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event());
work_sender->Shutdown();
}
template<
typename TGraphAllocator,
template <typename> class TWeightDatum, template <typename> class TDistanceDatum, typename...UnusedData>
static std::vector<distance_t> Gather(
TGraphAllocator& graph_allocator,
TWeightDatum<distance_t>& weights_datum, TDistanceDatum<distance_t>& distances_datum,
UnusedData&... data)
{
graph_allocator.GatherDatum(distances_datum);
return distances_datum.GetHostData();
}
template<
template <typename> class TWeightDatum,
template <typename> class TDistanceDatum,
typename...UnusedData>
static std::vector<distance_t> Host(
groute::graphs::host::CSRGraph& graph,
TWeightDatum<distance_t>& weights_datum, TDistanceDatum<distance_t>& distances_datum,
UnusedData&... data)
{
return SSSPHostNaive(graph, weights_datum.GetHostDataPtr(), min(max(0, FLAGS_source_node), graph.nnodes - 1));
}
static int Output(const char *file, const std::vector<distance_t>& distances)
{
return SSSPOutput(file, distances);
}
static int CheckErrors(const std::vector<distance_t>& distances, const std::vector<distance_t>& regression)
{
return SSSPCheckErrors(distances, regression);
}
};
}
}
bool TestSSSPAsyncMultiOptimized(int ngpus)
{
typedef sssp::opt::FusedProblem<groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatumSeg, groute::graphs::dev::GraphDatum> ProblemType;
typedef groute::graphs::traversal::FusedSolver<
sssp::opt::Algo, ProblemType,
sssp::opt::local_work_t , sssp::opt::remote_work_t, distance_t,
sssp::opt::SplitOps,
groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatumSeg<distance_t>, groute::graphs::dev::GraphDatum<distance_t>> SolverType;
groute::graphs::traversal::__MultiRunner__Opt__ <
sssp::opt::Algo,
ProblemType,
SolverType,
sssp::opt::SplitOps,
sssp::opt::local_work_t,
sssp::opt::remote_work_t,
groute::graphs::multi::EdgeInputDatum<distance_t>,
groute::graphs::multi::NodeOutputGlobalDatum<distance_t> > runner;
groute::graphs::multi::EdgeInputDatum<distance_t> edge_weights;
groute::graphs::multi::NodeOutputGlobalDatum<distance_t> node_distances;
return runner(ngpus, edge_weights, node_distances);
}
|
3db27bdba4e7e5ab5ab2aedbfd52b07099565b09.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef __HIPCC__
#include "Macro.h"
#else
#include "GAMER.h"
#endif
#include "CUPOT.h"
#ifdef GRAVITY
// soften length implementation
# define SOFTEN_PLUMMER
//# define SOFTEN_RUFFERT
//-----------------------------------------------------------------------------------------
// Function : CUPOT_ExternalAcc / CPU_ExternlAcc
// Description : Calculate the external acceleration at the given coordinates and time
//
// Note : 1. This function will be invoked by both CPU and GPU
// 2. "__forceinline__" is required since this device function will be invoked
// by more than one kernels (e.g., CUPOT_HydroGravitySolver, CUFLU_ComputeFlux)
// 3. The auxiliary array "UserArray" is set by "Init_ExternalAcc_Ptr", which
// points to "Init_ExternalAcc()" by default but may be overwritten by various
// test problem initializers
// 4. By default we assume
// UserArray[0] = x coordinate of the external acceleration center
// UserArray[1] = y ...
// UserArray[2] = z ..
// UserArray[3] = gravitational_constant*point_source_mass
// UserArray[4] = soften_length (<=0.0 --> disable)
// --> but one can easily modify this file to change the default behavior
// 5. Two different soften length implementations are supported
// --> SOFTEN_PLUMMER & SOFTEN_RUFFERT
//
// Parameter : Acc : Array to store the output external acceleration
// x/y/z : Target spatial coordinates
// Time : Current physical time
// UserArray : User-provided auxiliary array (set by "Init_ExternalAcc_Ptr")
//
// Return : Acc
//-----------------------------------------------------------------------------------------
#ifdef __HIPCC__
__forceinline__ __device__
void CUPOT_ExternalAcc( real Acc[], const double x, const double y, const double z, const double Time, const double UserArray[] )
#else
void CPU_ExternalAcc( real Acc[], const double x, const double y, const double z, const double Time, const double UserArray[] )
#endif
#ifndef EXTERNAL_NFW_POTENTIAL
{
const double Cen[3] = { UserArray[0], UserArray[1], UserArray[2] };
const real GM = (real)UserArray[3];
const real eps = (real)UserArray[4];
const real dx = (real)(x - Cen[0]);
const real dy = (real)(y - Cen[1]);
const real dz = (real)(z - Cen[2]);
const real r = SQRT( dx*dx + dy*dy + dz*dz );
// Plummer
# if ( defined SOFTEN_PLUMMER )
const real _r3 = ( eps <= (real)0.0 ) ? (real)1.0/CUBE(r) : POW( SQR(r)+SQR(eps), (real)-1.5 );
// Ruffert 1994
# elif ( defined SOFTEN_RUFFERT )
const real tmp = EXP( -SQR(r)/SQR(eps) );
const real _r3 = ( eps <= (real)0.0 ) ? (real)1.0/CUBE(r) : POW( SQR(r)+SQR(eps)*tmp, (real)-1.5 )*( (real)1.0 - tmp );
# else
const real _r3 = (real)1.0/CUBE(r);
# endif
Acc[0] = -GM*_r3*dx;
Acc[1] = -GM*_r3*dy;
Acc[2] = -GM*_r3*dz;
} // FUNCTION : CUPOT_ExternalAcc / CPU_ExternalAcc
#else
{
// Potential for a NFW Profile
//
const double Cen[3] = { UserArray[0], UserArray[1], UserArray[2] };
const real FourPiGrho = (real)UserArray[3];
const real dx = (real)(x - Cen[0]);
const real dy = (real)(y - Cen[1]);
const real dz = (real)(z - Cen[2]);
const real r = SQRT( dx*dx + dy*dy + dz*dz );
const real Rs = (real)UserArray[4];
const real eps = (real)UserArray[5] ;
// soften the potential around center
const real _r3 = ( eps <= (real)0.0 ) ? (real)1.0/CUBE(r) : POW( SQR(r)+SQR(eps), (real)-1.5 );
const real force = FourPiGrho * CUBE(Rs) * ( LOG( 1.0 + r / Rs ) - r / (Rs + r) );
Acc[0] = -force*_r3*dx;
Acc[1] = -force*_r3*dy;
Acc[2] = -force*_r3*dz;
}
#endif // #ifdef EXTERNAL_NFW_POTENTIAL
#endif // #ifdef GRAVITY
| 3db27bdba4e7e5ab5ab2aedbfd52b07099565b09.cu | #ifdef __CUDACC__
#include "Macro.h"
#else
#include "GAMER.h"
#endif
#include "CUPOT.h"
#ifdef GRAVITY
// soften length implementation
# define SOFTEN_PLUMMER
//# define SOFTEN_RUFFERT
//-----------------------------------------------------------------------------------------
// Function : CUPOT_ExternalAcc / CPU_ExternlAcc
// Description : Calculate the external acceleration at the given coordinates and time
//
// Note : 1. This function will be invoked by both CPU and GPU
// 2. "__forceinline__" is required since this device function will be invoked
// by more than one kernels (e.g., CUPOT_HydroGravitySolver, CUFLU_ComputeFlux)
// 3. The auxiliary array "UserArray" is set by "Init_ExternalAcc_Ptr", which
// points to "Init_ExternalAcc()" by default but may be overwritten by various
// test problem initializers
// 4. By default we assume
// UserArray[0] = x coordinate of the external acceleration center
// UserArray[1] = y ...
// UserArray[2] = z ..
// UserArray[3] = gravitational_constant*point_source_mass
// UserArray[4] = soften_length (<=0.0 --> disable)
// --> but one can easily modify this file to change the default behavior
// 5. Two different soften length implementations are supported
// --> SOFTEN_PLUMMER & SOFTEN_RUFFERT
//
// Parameter : Acc : Array to store the output external acceleration
// x/y/z : Target spatial coordinates
// Time : Current physical time
// UserArray : User-provided auxiliary array (set by "Init_ExternalAcc_Ptr")
//
// Return : Acc
//-----------------------------------------------------------------------------------------
#ifdef __CUDACC__
__forceinline__ __device__
void CUPOT_ExternalAcc( real Acc[], const double x, const double y, const double z, const double Time, const double UserArray[] )
#else
void CPU_ExternalAcc( real Acc[], const double x, const double y, const double z, const double Time, const double UserArray[] )
#endif
#ifndef EXTERNAL_NFW_POTENTIAL
{
const double Cen[3] = { UserArray[0], UserArray[1], UserArray[2] };
const real GM = (real)UserArray[3];
const real eps = (real)UserArray[4];
const real dx = (real)(x - Cen[0]);
const real dy = (real)(y - Cen[1]);
const real dz = (real)(z - Cen[2]);
const real r = SQRT( dx*dx + dy*dy + dz*dz );
// Plummer
# if ( defined SOFTEN_PLUMMER )
const real _r3 = ( eps <= (real)0.0 ) ? (real)1.0/CUBE(r) : POW( SQR(r)+SQR(eps), (real)-1.5 );
// Ruffert 1994
# elif ( defined SOFTEN_RUFFERT )
const real tmp = EXP( -SQR(r)/SQR(eps) );
const real _r3 = ( eps <= (real)0.0 ) ? (real)1.0/CUBE(r) : POW( SQR(r)+SQR(eps)*tmp, (real)-1.5 )*( (real)1.0 - tmp );
# else
const real _r3 = (real)1.0/CUBE(r);
# endif
Acc[0] = -GM*_r3*dx;
Acc[1] = -GM*_r3*dy;
Acc[2] = -GM*_r3*dz;
} // FUNCTION : CUPOT_ExternalAcc / CPU_ExternalAcc
#else
{
// Potential for a NFW Profile
//
const double Cen[3] = { UserArray[0], UserArray[1], UserArray[2] };
const real FourPiGrho = (real)UserArray[3];
const real dx = (real)(x - Cen[0]);
const real dy = (real)(y - Cen[1]);
const real dz = (real)(z - Cen[2]);
const real r = SQRT( dx*dx + dy*dy + dz*dz );
const real Rs = (real)UserArray[4];
const real eps = (real)UserArray[5] ;
// soften the potential around center
const real _r3 = ( eps <= (real)0.0 ) ? (real)1.0/CUBE(r) : POW( SQR(r)+SQR(eps), (real)-1.5 );
const real force = FourPiGrho * CUBE(Rs) * ( LOG( 1.0 + r / Rs ) - r / (Rs + r) );
Acc[0] = -force*_r3*dx;
Acc[1] = -force*_r3*dy;
Acc[2] = -force*_r3*dz;
}
#endif // #ifdef EXTERNAL_NFW_POTENTIAL
#endif // #ifdef GRAVITY
|
60ae0a956478380086973475e5a3a8bd292ed66f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Rayhana ZIARA
* produit matrice vecteur
*/
#include <stdlib.h>
#include <stdio.h>
/*
* DESCRIPTION : kernel concernant le produit matrice vecteur
* PARAMETRES : matrice A, vecteur v, vecteur r et taille des vecteurs
* RETOUR : /
*/
__global__ void matVect(float *A, float *v, float *r, int size)
{
float resultat = 0.0;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index > size)
{
printf("ERREUR - Index > size\n");
return;
}
for(int i = 0; i < size; i++)
resultat += A[i * size + index] * v[i];
r[index] = resultat;
}
/*
* DESCRIPTION : fonction d'affichage de matrice et de vecteur
* PARAMETRES : matrice afficher, nb ligne et nb colonne de A,
* RETOUR : /
*/
void affichage(float *M, int ligne, int colonne)
{
for(int i = 0; i < ligne; i++)
{
for(int j = 0; j < colonne; j++)
fprintf(stdout, "%lf\t", M[i * ligne + j]);
fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
}
int main(int argc, char **argv)
{
// declaration des variables du produit matrice vecteur
// variables du hote
float *A, *v, *r;
int n; // taille de la matrice et du vecteur
// variables du device
float *d_A, *d_v, *d_r;
if(argc != 2)
{
fprintf(stderr, "ERREUR - Veuillez entrez la taille de A en parametre d'execution. Merci'\n./exam_rz n \n");
return -1;
}
n = atoi(argv[1]); // taille de la matrice A(n * n) et du vecteur v (n)
// allocation memoire dans le hote pour la matrice A et le vecteur d
A = (float*)malloc(n * n * sizeof(float));
v = (float*)malloc(n * sizeof(float));
r = (float*)malloc(n * sizeof(float));
// initialisation de la matrice A (matrice stocke en 1D) et du vecteur v
for(int i = 0; i < n; i++)
{
v[i] = i * n;
for(int j = 0; j < n; j++)
A[i * n + j] = i * n + j;
}
// allocation memoire dans le device pour les equivalents de matrice A et du vecteur v
hipMalloc((void**)&d_A, n * n * sizeof(float));
hipMalloc((void**)&d_v, n * sizeof(float));
hipMalloc((void**)&d_r, n * sizeof(float));
// copie de la matrice A et du vecteur v dans le device
hipMemcpy(d_A, A, n * n * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_v, v, n * sizeof(float), hipMemcpyHostToDevice);
// appel du kernel
dim3 threads(4, 4); // 32*16
dim3 blocks;
blocks.x = (n + threads.x - 1) / threads.x;
blocks.y = (n + threads.y - 1) / threads.y;
hipLaunchKernelGGL(( matVect), dim3(blocks), dim3(threads), 0, 0, d_A, d_v, d_r, n);
// attente de tous les threads
hipDeviceSynchronize();
// copie de la matrice equivalente C dans le hote
hipMemcpy(r, d_r, n * sizeof(float), hipMemcpyDeviceToHost);
fprintf(stdout, "Matrice A\n");
affichage(A, n, n);
fprintf(stdout, "Vecteur v\n");
affichage(v, 1, n);
fprintf(stdout, "Vecteur r\n");
affichage(r, 1, n);
// liberation memoire hote
free(A);
free(v);
free(r);
// liberation memoire device
hipFree(d_A);
hipFree(d_v);
hipFree(d_r);
return 0;
}
| 60ae0a956478380086973475e5a3a8bd292ed66f.cu | /*
* Rayhana ZIARA
* produit matrice vecteur
*/
#include <stdlib.h>
#include <stdio.h>
/*
* DESCRIPTION : kernel concernant le produit matrice vecteur
* PARAMETRES : matrice A, vecteur v, vecteur r et taille des vecteurs
* RETOUR : /
*/
__global__ void matVect(float *A, float *v, float *r, int size)
{
float resultat = 0.0;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index > size)
{
printf("ERREUR - Index > size\n");
return;
}
for(int i = 0; i < size; i++)
resultat += A[i * size + index] * v[i];
r[index] = resultat;
}
/*
* DESCRIPTION : fonction d'affichage de matrice et de vecteur
* PARAMETRES : matrice à afficher, nb ligne et nb colonne de A,
* RETOUR : /
*/
void affichage(float *M, int ligne, int colonne)
{
for(int i = 0; i < ligne; i++)
{
for(int j = 0; j < colonne; j++)
fprintf(stdout, "%lf\t", M[i * ligne + j]);
fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
}
int main(int argc, char **argv)
{
// declaration des variables du produit matrice vecteur
// variables du hote
float *A, *v, *r;
int n; // taille de la matrice et du vecteur
// variables du device
float *d_A, *d_v, *d_r;
if(argc != 2)
{
fprintf(stderr, "ERREUR - Veuillez entrez la taille de A en parametre d'execution. Merci'\n./exam_rz n \n");
return -1;
}
n = atoi(argv[1]); // taille de la matrice A(n * n) et du vecteur v (n)
// allocation memoire dans le hote pour la matrice A et le vecteur d
A = (float*)malloc(n * n * sizeof(float));
v = (float*)malloc(n * sizeof(float));
r = (float*)malloc(n * sizeof(float));
// initialisation de la matrice A (matrice stockée en 1D) et du vecteur v
for(int i = 0; i < n; i++)
{
v[i] = i * n;
for(int j = 0; j < n; j++)
A[i * n + j] = i * n + j;
}
// allocation memoire dans le device pour les equivalents de matrice A et du vecteur v
cudaMalloc((void**)&d_A, n * n * sizeof(float));
cudaMalloc((void**)&d_v, n * sizeof(float));
cudaMalloc((void**)&d_r, n * sizeof(float));
// copie de la matrice A et du vecteur v dans le device
cudaMemcpy(d_A, A, n * n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_v, v, n * sizeof(float), cudaMemcpyHostToDevice);
// appel du kernel
dim3 threads(4, 4); // 32*16
dim3 blocks;
blocks.x = (n + threads.x - 1) / threads.x;
blocks.y = (n + threads.y - 1) / threads.y;
matVect<<<blocks, threads>>>(d_A, d_v, d_r, n);
// attente de tous les threads
cudaThreadSynchronize();
// copie de la matrice equivalente C dans le hote
cudaMemcpy(r, d_r, n * sizeof(float), cudaMemcpyDeviceToHost);
fprintf(stdout, "Matrice A\n");
affichage(A, n, n);
fprintf(stdout, "Vecteur v\n");
affichage(v, 1, n);
fprintf(stdout, "Vecteur r\n");
affichage(r, 1, n);
// liberation memoire hote
free(A);
free(v);
free(r);
// liberation memoire device
cudaFree(d_A);
cudaFree(d_v);
cudaFree(d_r);
return 0;
}
|
c5d0286da8697bb7cfdc18149e5f4d00d6eb26b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include "cpu_bitmap.h"
#include "bitmap_help.h"
#define CHANNELS 4
__global__ void rgbToGray(unsigned char *grayImage, int width, int height)
{
int rgbOffset= (blockIdx.x + blockIdx.y * gridDim.x) * CHANNELS;
printf("rgbOffset %d",rgbOffset);
unsigned char red = grayImage[rgbOffset];
unsigned char green = grayImage[rgbOffset + 1];
unsigned char blue = grayImage[rgbOffset + 2];
int grayOffset= 0.21f*red + 0.71f*green + 0.07f*blue;
grayImage[rgbOffset] = grayImage[rgbOffset + 1] = grayImage[rgbOffset + 2]= grayOffset;
__syncthreads();
}
__host__ void imgProc(unsigned char *map, int size, int width, int height) {
unsigned char* grayImage;
size_t imgSize = size;
hipMalloc((void**)&grayImage,imgSize);
hipMemcpy(grayImage,map,imgSize,hipMemcpyHostToDevice);
const dim3 gridSize(width,height,1);
const dim3 blockSize(1,1,1);
hipLaunchKernelGGL(( rgbToGray), dim3(gridSize),dim3(blockSize), 0, 0, grayImage, width, height);
hipDeviceSynchronize();
hipMemcpy(map,grayImage,imgSize,hipMemcpyDeviceToHost);
return;
}
int main(void) {
char fname[50];
FILE* infile;
unsigned short ftype;
tagBMFH bitHead;
tagBMIH bitInfoHead;
tagRGBQ *pRgb;
printf("Please enter the .bmp file name: ");
scanf("%s", fname);
strcat(fname,".bmp");
infile = fopen(fname, "rb");
if (infile != NULL) {
printf("File open successful.\n");
fread(&ftype, 1, sizeof(unsigned short), infile);
if (ftype != 0x4d42)
{
printf("File not .bmp format.\n");
return 1;
}
fread(&bitHead, 1, sizeof(tagBMFH), infile);
fread(&bitInfoHead, 1, sizeof(tagBMIH), infile);
}
else {
printf("File open fail.\n");
return 1;
}
if (bitInfoHead.biBitCount < 24) {
long nPlateNum = long(pow(2, double(bitInfoHead.biBitCount)));
pRgb = (tagRGBQ *)malloc(nPlateNum * sizeof(tagRGBQ));
memset(pRgb, 0, nPlateNum * sizeof(tagRGBQ));
int num = fread(pRgb, 4, nPlateNum, infile);
}
int width = bitInfoHead.biWidth;
int height = bitInfoHead.biHeight;
int l_width = 4 * ((width * bitInfoHead.biBitCount + 31) / 32);
long nData = height * l_width;
unsigned char *pColorData = (unsigned char *)malloc(nData);
memset(pColorData, 0, nData);
fread(pColorData, 1, nData, infile);
fclose(infile);
CPUBitmap dataOfBmp(width, height);
unsigned char *map = dataOfBmp.get_ptr();
if (bitInfoHead.biBitCount < 24) {
int k, index = 0;
if (bitInfoHead.biBitCount == 1) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 8;
mixIndex = pColorData[k];
if (j % 8 < 7) mixIndex = mixIndex << (7 - (j % 8));
mixIndex = mixIndex >> 7;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 2) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 4;
mixIndex = pColorData[k];
if (j % 4 < 3) mixIndex = mixIndex << (6 - 2 * (j % 4));
mixIndex = mixIndex >> 6;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 4) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 2;
mixIndex = pColorData[k];
if (j % 2 == 0) mixIndex = mixIndex << 4;
mixIndex = mixIndex >> 4;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 8) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j;
mixIndex = pColorData[k];
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 16) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j * 2;
unsigned char shortTemp = pColorData[k + 1] << 8;
mixIndex = pColorData[k] + shortTemp;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
}
else {
int k, index = 0;
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
k = i * l_width + j * 3;
map[index * 4 + 0] = pColorData[k + 2];
map[index * 4 + 1] = pColorData[k + 1];
map[index * 4 + 2] = pColorData[k];
index++;
}
}
imgProc(map, dataOfBmp.image_size(), width, height);
dataOfBmp.display_and_exit();
return 0;
} | c5d0286da8697bb7cfdc18149e5f4d00d6eb26b1.cu | #include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include "cpu_bitmap.h"
#include "bitmap_help.h"
#define CHANNELS 4
__global__ void rgbToGray(unsigned char *grayImage, int width, int height)
{
int rgbOffset= (blockIdx.x + blockIdx.y * gridDim.x) * CHANNELS;
printf("rgbOffset %d",rgbOffset);
unsigned char red = grayImage[rgbOffset];
unsigned char green = grayImage[rgbOffset + 1];
unsigned char blue = grayImage[rgbOffset + 2];
int grayOffset= 0.21f*red + 0.71f*green + 0.07f*blue;
grayImage[rgbOffset] = grayImage[rgbOffset + 1] = grayImage[rgbOffset + 2]= grayOffset;
__syncthreads();
}
__host__ void imgProc(unsigned char *map, int size, int width, int height) {
unsigned char* grayImage;
size_t imgSize = size;
cudaMalloc((void**)&grayImage,imgSize);
cudaMemcpy(grayImage,map,imgSize,cudaMemcpyHostToDevice);
const dim3 gridSize(width,height,1);
const dim3 blockSize(1,1,1);
rgbToGray<<<gridSize,blockSize>>>(grayImage, width, height);
cudaDeviceSynchronize();
cudaMemcpy(map,grayImage,imgSize,cudaMemcpyDeviceToHost);
return;
}
int main(void) {
char fname[50];
FILE* infile;
unsigned short ftype;
tagBMFH bitHead;
tagBMIH bitInfoHead;
tagRGBQ *pRgb;
printf("Please enter the .bmp file name: ");
scanf("%s", fname);
strcat(fname,".bmp");
infile = fopen(fname, "rb");
if (infile != NULL) {
printf("File open successful.\n");
fread(&ftype, 1, sizeof(unsigned short), infile);
if (ftype != 0x4d42)
{
printf("File not .bmp format.\n");
return 1;
}
fread(&bitHead, 1, sizeof(tagBMFH), infile);
fread(&bitInfoHead, 1, sizeof(tagBMIH), infile);
}
else {
printf("File open fail.\n");
return 1;
}
if (bitInfoHead.biBitCount < 24) {
long nPlateNum = long(pow(2, double(bitInfoHead.biBitCount)));
pRgb = (tagRGBQ *)malloc(nPlateNum * sizeof(tagRGBQ));
memset(pRgb, 0, nPlateNum * sizeof(tagRGBQ));
int num = fread(pRgb, 4, nPlateNum, infile);
}
int width = bitInfoHead.biWidth;
int height = bitInfoHead.biHeight;
int l_width = 4 * ((width * bitInfoHead.biBitCount + 31) / 32);
long nData = height * l_width;
unsigned char *pColorData = (unsigned char *)malloc(nData);
memset(pColorData, 0, nData);
fread(pColorData, 1, nData, infile);
fclose(infile);
CPUBitmap dataOfBmp(width, height);
unsigned char *map = dataOfBmp.get_ptr();
if (bitInfoHead.biBitCount < 24) {
int k, index = 0;
if (bitInfoHead.biBitCount == 1) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 8;
mixIndex = pColorData[k];
if (j % 8 < 7) mixIndex = mixIndex << (7 - (j % 8));
mixIndex = mixIndex >> 7;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 2) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 4;
mixIndex = pColorData[k];
if (j % 4 < 3) mixIndex = mixIndex << (6 - 2 * (j % 4));
mixIndex = mixIndex >> 6;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 4) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 2;
mixIndex = pColorData[k];
if (j % 2 == 0) mixIndex = mixIndex << 4;
mixIndex = mixIndex >> 4;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 8) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j;
mixIndex = pColorData[k];
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 16) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j * 2;
unsigned char shortTemp = pColorData[k + 1] << 8;
mixIndex = pColorData[k] + shortTemp;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
}
else {
int k, index = 0;
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
k = i * l_width + j * 3;
map[index * 4 + 0] = pColorData[k + 2];
map[index * 4 + 1] = pColorData[k + 1];
map[index * 4 + 2] = pColorData[k];
index++;
}
}
imgProc(map, dataOfBmp.image_size(), width, height);
dataOfBmp.display_and_exit();
return 0;
} |
68535c7b2388fc53180ca3af4a5fed88a530df3b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2016 Maximilian Knespel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "cudaVectorReduce.hpp"
#include <cassert>
#include <cstdint> // uint64_t
#include <limits> // lowest
#include <cmath>
#include <hip/hip_runtime.h> // atomicCAS
#include <hipfft.h> // hipfftComplex, hipfftDoubleComplex
#include "libs/cudacommon.h"
namespace imresh
{
namespace algorithms
{
namespace cuda
{
SumFunctor<float > sumFunctorf;
MinFunctor<float > minFunctorf;
MaxFunctor<float > maxFunctorf;
SumFunctor<double> sumFunctord;
MinFunctor<double> minFunctord;
MaxFunctor<double> maxFunctord;
template<class T_PREC, class T_FUNC>
__device__ inline void atomicFunc
(
T_PREC * const rdpTarget,
const T_PREC rValue,
T_FUNC f
)
{
/* atomicCAS only is defined for int and long long int, thats why we
* need these roundabout casts */
int assumed;
int old = * (int*) rdpTarget;
/* atomicCAS returns the value with which the current value 'assumed'
* was compared. If the value changed between reading out to assumed
* and calculating the reduced value and storing it back, then we
* need to call this function again. (I hope the GPU has some
* functionality to prevent synchronized i.e. neverending races ... */
do
{
assumed = old;
/* If the reduced value doesn't change, then we don't need to hinder
* other threads with atomicCAS. This additional check may prove a
* bottleneck, if this is rarely the case, e.g. for sum and no 0s or
* for max and an ordered list, where the largest is the last
* element. In tests this more often slowed down the calculation */
//if ( f( __int_as_float(assumed), rValue ) == assumed )
// break;
/* compare and swap after the value was read with assumend, return
* old value, if assumed isn't anymore the value at rdpTarget,
* then we will have to try again to write it */
old = atomicCAS( (int*) rdpTarget, assumed,
__float_as_int( f( __int_as_float(assumed), rValue ) ) );
}
while ( assumed != old );
}
template<>
__device__ inline void atomicFunc<int,MaxFunctor<int>>
(
int * const rdpTarget,
const int rValue,
MaxFunctor<int> f
)
{
atomicMax( rdpTarget, rValue );
}
/*
// seems to work for testVectorReduce, but it shouldn't oO, maybe just good numbers, or because this is only for max, maybe it wouldn't work for min, because the maximum is > 0 ... In the end it isn't faster than atomicCAS and it doesn't even use floatAsOrderdInt yet, which would make use of bitshift, subtraction and logical or, thereby decreasing performance even more: http://stereopsis.com/radix.html
template<>
__device__ inline void atomicFunc<float,MaxFunctor<float>>
(
float * const rdpTarget,
const float rValue,
MaxFunctor<float> f
)
{
atomicMax( (int*)rdpTarget, __float_as_int(rValue) );
}*/
template<class T_PREC, class T_FUNC>
__global__ void kernelVectorReduceShared
(
const T_PREC * const rdpData,
const unsigned rnData,
T_PREC * const rdpResult,
T_FUNC f,
const T_PREC rInitValue
)
{
assert( blockDim.y == 1 );
assert( blockDim.z == 1 );
assert( gridDim.y == 1 );
assert( gridDim.z == 1 );
const int32_t nTotalThreads = gridDim.x * blockDim.x;
int32_t i = blockIdx.x * blockDim.x + threadIdx.x;
assert( i < nTotalThreads );
T_PREC localReduced = T_PREC(rInitValue);
for ( ; i < rnData; i += nTotalThreads )
localReduced = f( localReduced, rdpData[i] );
__shared__ T_PREC smReduced;
/* master thread of every block shall set shared mem variable to 0 */
__syncthreads();
if ( threadIdx.x == 0 )
smReduced = T_PREC(rInitValue);
__syncthreads();
atomicFunc( &smReduced, localReduced, f );
__syncthreads();
if ( threadIdx.x == 0 )
atomicFunc( rdpResult, smReduced, f );
}
/**
* benchmarks suggest that this kernel is twice as fast as
* kernelVectorReduceShared
**/
template<class T_PREC, class T_FUNC>
__global__ void kernelVectorReduceSharedMemoryWarps
(
const T_PREC * const rdpData,
const unsigned rnData,
T_PREC * const rdpResult,
T_FUNC f,
const T_PREC rInitValue
)
{
assert( blockDim.y == 1 );
assert( blockDim.z == 1 );
assert( gridDim.y == 1 );
assert( gridDim.z == 1 );
const int32_t nTotalThreads = gridDim.x * blockDim.x;
int32_t i = blockIdx.x * blockDim.x + threadIdx.x;
assert( i < nTotalThreads );
T_PREC localReduced = T_PREC(rInitValue);
for ( ; i < rnData; i += nTotalThreads )
localReduced = f( localReduced, rdpData[i] );
/**
* reduce per warp:
* With __shfl_down we can read the register values of other lanes in
* a warp. In the first iteration lane 0 will add to it's value the
* value of lane 16, lane 1 from lane 17 and so in.
* In the next step lane 0 will add the result from lane 8.
* In the end lane 0 will have the reduced value.
* @see http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
**/
constexpr int warpSize = 32;
const int32_t laneId = threadIdx.x % warpSize;
for ( int32_t warpDelta = warpSize / 2; warpDelta > 0; warpDelta /= 2)
localReduced = f( localReduced, __shfl_down( localReduced, warpDelta ) );
__shared__ T_PREC smReduced;
/* master thread of every block shall set shared mem variable to 0 */
__syncthreads();
if ( threadIdx.x == 0 )
smReduced = T_PREC(rInitValue);
__syncthreads();
if ( laneId == 0 )
atomicFunc( &smReduced, localReduced, f );
__syncthreads();
if ( threadIdx.x == 0 )
atomicFunc( rdpResult, smReduced, f );
}
template<class T_PREC, class T_FUNC>
__global__ void kernelVectorReduceWarps
(
const T_PREC * const rdpData,
const unsigned rnData,
T_PREC * const rdpResult,
T_FUNC f,
const T_PREC rInitValue
)
{
assert( blockDim.y == 1 );
assert( blockDim.z == 1 );
assert( gridDim.y == 1 );
assert( gridDim.z == 1 );
const int32_t nTotalThreads = gridDim.x * blockDim.x;
int32_t i = blockIdx.x * blockDim.x + threadIdx.x;
assert( i < nTotalThreads );
T_PREC localReduced = T_PREC(rInitValue);
for ( ; i < rnData; i += nTotalThreads )
localReduced = f( localReduced, rdpData[i] );
/* reduce per warp (warpSize == 32 assumed) */
const int32_t laneId = threadIdx.x % 32;
#pragma unroll
for ( int32_t warpDelta = 32 / 2; warpDelta > 0; warpDelta /= 2)
localReduced = f( localReduced, __shfl_down( localReduced, warpDelta ) );
if ( laneId == 0 )
atomicFunc( rdpResult, localReduced, f );
}
template<class T_PREC, class T_FUNC>
T_PREC cudaReduce
(
const T_PREC * const rdpData,
const unsigned rnElements,
T_FUNC f,
const T_PREC rInitValue,
hipStream_t rStream
)
{
const unsigned nThreads = 128;
//const unsigned nBlocks = ceil( (float) rnElements / nThreads );
//printf( "nThreads = %i, nBlocks = %i\n", nThreads, nBlocks );
const unsigned nBlocks = 288;
/* 256*256 = 65536 concurrent threads should fill most modern graphic
* cards. E.g. GTX 760 can only handle 12288 runnin concurrently,
* everything else will be run after some threads finished. The
* number of kernels is only 384, because of oversubscription with
* warps */
assert( nBlocks < 65536 );
T_PREC reducedValue;
T_PREC * dpReducedValue;
T_PREC initValue = rInitValue;
CUDA_ERROR( hipMalloc( (void**) &dpReducedValue, sizeof(float) ) );
CUDA_ERROR( hipMemcpyAsync( dpReducedValue, &initValue, sizeof(float), hipMemcpyHostToDevice, rStream ) );
/* memcpy is on the same stream as kernel will be, so no synchronize needed! */
hipLaunchKernelGGL(( kernelVectorReduceWarps), dim3(nBlocks), dim3(nThreads), 0, rStream ,
rdpData, rnElements, dpReducedValue, f, rInitValue );
CUDA_ERROR( hipStreamSynchronize( rStream ) );
CUDA_ERROR( hipMemcpyAsync( &reducedValue, dpReducedValue, sizeof(float), hipMemcpyDeviceToHost, rStream ) );
CUDA_ERROR( hipStreamSynchronize( rStream) );
CUDA_ERROR( hipFree( dpReducedValue ) );
return reducedValue;
}
template<class T_PREC, class T_FUNC>
T_PREC cudaReduceSharedMemory
(
const T_PREC * const rdpData,
const unsigned rnElements,
T_FUNC f,
const T_PREC rInitValue,
hipStream_t rStream
)
{
/* the more threads we have the longer the reduction will be
* done inside shared memory instead of global memory */
const unsigned nThreads = 256;
const unsigned nBlocks = 256;
assert( nBlocks < 65536 );
T_PREC reducedValue;
T_PREC * dpReducedValue;
T_PREC initValue = rInitValue;
CUDA_ERROR( hipMalloc( (void**) &dpReducedValue, sizeof(float) ) );
CUDA_ERROR( hipMemcpyAsync( dpReducedValue, &initValue, sizeof(float), hipMemcpyHostToDevice, rStream ) );
/* memcpy is on the same stream as kernel will be, so no synchronize needed! */
hipLaunchKernelGGL(( kernelVectorReduceShared), dim3(nBlocks), dim3(nThreads), 0, rStream ,
rdpData, rnElements, dpReducedValue, f, rInitValue );
CUDA_ERROR( hipStreamSynchronize( rStream ) );
CUDA_ERROR( hipMemcpyAsync( &reducedValue, dpReducedValue, sizeof(float), hipMemcpyDeviceToHost, rStream ) );
CUDA_ERROR( hipStreamSynchronize( rStream) );
CUDA_ERROR( hipFree( dpReducedValue ) );
return reducedValue;
}
template<class T_PREC, class T_FUNC>
T_PREC cudaReduceSharedMemoryWarps
(
const T_PREC * const rdpData,
const unsigned rnElements,
T_FUNC f,
const T_PREC rInitValue,
hipStream_t rStream
)
{
const unsigned nThreads = 256;
const unsigned nBlocks = 256;
assert( nBlocks < 65536 );
T_PREC reducedValue;
T_PREC * dpReducedValue;
T_PREC initValue = rInitValue;
CUDA_ERROR( hipMalloc( (void**) &dpReducedValue, sizeof(float) ) );
CUDA_ERROR( hipMemcpyAsync( dpReducedValue, &initValue, sizeof(float), hipMemcpyHostToDevice, rStream ) );
/* memcpy is on the same stream as kernel will be, so no synchronize needed! */
hipLaunchKernelGGL(( kernelVectorReduceSharedMemoryWarps), dim3(nBlocks), dim3(nThreads), 0, rStream ,
rdpData, rnElements, dpReducedValue, f, rInitValue );
CUDA_ERROR( hipStreamSynchronize( rStream ) );
CUDA_ERROR( hipMemcpyAsync( &reducedValue, dpReducedValue, sizeof(float), hipMemcpyDeviceToHost, rStream ) );
CUDA_ERROR( hipStreamSynchronize( rStream) );
CUDA_ERROR( hipFree( dpReducedValue ) );
return reducedValue;
}
template<class T_PREC>
T_PREC cudaVectorMin
(
const T_PREC * const rdpData,
const unsigned rnElements,
hipStream_t rStream
)
{
MinFunctor<T_PREC> minFunctor;
return cudaReduce( rdpData, rnElements, minFunctor, std::numeric_limits<T_PREC>::max(), rStream );
}
template<class T_PREC>
T_PREC cudaVectorMax
(
const T_PREC * const rdpData,
const unsigned rnElements,
hipStream_t rStream
)
{
MaxFunctor<T_PREC> maxFunctor;
return cudaReduce( rdpData, rnElements, maxFunctor, std::numeric_limits<T_PREC>::lowest(), rStream );
}
template<class T_PREC>
T_PREC cudaVectorSum
(
const T_PREC * const rdpData,
const unsigned rnElements,
hipStream_t rStream
)
{
SumFunctor<T_PREC> sumFunctor;
return cudaReduce( rdpData, rnElements, sumFunctor, T_PREC(0), rStream );
}
/* These functions only persist for benchmarking purposes to show that
* the standard version is the fastest */
template<class T_PREC>
T_PREC cudaVectorMaxSharedMemory
(
const T_PREC * const rdpData,
const unsigned rnElements,
hipStream_t rStream
)
{
MaxFunctor<T_PREC> maxFunctor;
return cudaReduceSharedMemory( rdpData, rnElements, maxFunctor, std::numeric_limits<T_PREC>::lowest(), rStream );
}
template<class T_PREC>
T_PREC cudaVectorMaxSharedMemoryWarps
(
const T_PREC * const rdpData,
const unsigned rnElements,
hipStream_t rStream
)
{
MaxFunctor<T_PREC> maxFunctor;
return cudaReduceSharedMemoryWarps( rdpData, rnElements, maxFunctor, std::numeric_limits<T_PREC>::lowest(), rStream );
}
/**
* "For the input-output algorithms the error E_F is
* usually meaningless since the input g_k(X) is no longer
* an estimate of the object. Then the meaningful error
* is the object-domain error E_0 given by Eq. (15)."
* (Fienup82)
* Eq.15:
* @f[ E_{0k}^2 = \sum\limits_{x\in\gamma} |g_k'(x)^2|^2 @f]
* where \gamma is the domain at which the constraints are
* not met. SO this is the sum over the domain which should
* be 0.
*
* Eq.16:
* @f[ E_{Fk}^2 = \sum\limits_{u} |G_k(u) - G_k'(u)|^2 / N^2
= \sum_x |g_k(x) - g_k'(x)|^2 @f]
**/
template< class T_COMPLEX, class T_MASK_ELEMENT >
__global__ void cudaKernelCalculateHioError
(
const T_COMPLEX * const rdpgPrime,
const T_MASK_ELEMENT * const rdpIsMasked,
const unsigned rnData,
const bool rInvertMask,
float * const rdpTotalError,
float * const rdpnMaskedPixels
)
{
assert( blockDim.y == 1 );
assert( blockDim.z == 1 );
assert( gridDim.y == 1 );
assert( gridDim.z == 1 );
const int32_t nTotalThreads = gridDim.x * blockDim.x;
int32_t i = blockIdx.x * blockDim.x + threadIdx.x;
assert( i < nTotalThreads );
float localTotalError = 0;
float localnMaskedPixels = 0;
for ( ; i < rnData; i += nTotalThreads )
{
const auto & re = rdpgPrime[i].x;
const auto & im = rdpgPrime[i].y;
/* only add up norm where no object should be (rMask == 0) */
/* note: invert + masked -> unmasked <=> 1 ? 1 -> 0
* noinvert + masked -> masked <=> 0 ? 1 -> 1
* invert + unmasked -> masked <=> 1 ? 0 -> 1
* noinvert + unmasked -> unmasked <=> 0 ? 0 -> 0
* => ? is xor => no thread divergence
*/
assert( rdpIsMasked[i] == 0 or rdpIsMasked[i] == 1 );
const bool shouldBeZero = rInvertMask xor (bool) rdpIsMasked[i];
assert( rdpIsMasked[i] >= 0.0 and rdpIsMasked[i] <= 1.0 );
//float shouldBeZero = rInvertMask + ( 1-2*rInvertMask )*rdpIsMasked[i];
/*
float shouldBeZero = rdpIsMasked[i];
if ( rInvertMask )
shouldBeZero = 1 - shouldBeZero;
*/
localTotalError += shouldBeZero * ( re*re+im*im );
localnMaskedPixels += shouldBeZero;
}
/* reduce per warp (warpSize == 32 assumed) */
const int32_t laneId = threadIdx.x % 32;
#pragma unroll
for ( int32_t warpDelta = 32 / 2; warpDelta > 0; warpDelta /= 2 )
{
localTotalError += __shfl_down( localTotalError , warpDelta );
localnMaskedPixels += __shfl_down( localnMaskedPixels, warpDelta );
}
SumFunctor<float> sum;
if ( laneId == 0 )
{
atomicFunc( rdpTotalError , localTotalError , sum );
atomicFunc( rdpnMaskedPixels, localnMaskedPixels, sum );
}
}
template<class T_COMPLEX, class T_MASK_ELEMENT>
float calculateHioError
(
const T_COMPLEX * const & rdpData,
const T_MASK_ELEMENT * const & rdpIsMasked,
const unsigned & rnElements,
const bool & rInvertMask,
hipStream_t rStream
)
{
const unsigned nThreads = 256;
//const unsigned nBlocks = ceil( (float) rnElements / nThreads );
const unsigned nBlocks = 256;
assert( nBlocks < 65536 );
float totalError, nMaskedPixels;
float * dpTotalError, * dpnMaskedPixels;
CUDA_ERROR( hipMalloc( (void**) &dpTotalError , sizeof(float) ) );
CUDA_ERROR( hipMalloc( (void**) &dpnMaskedPixels, sizeof(float) ) );
CUDA_ERROR( hipMemsetAsync( dpTotalError , 0, sizeof(float), rStream ) );
CUDA_ERROR( hipMemsetAsync( dpnMaskedPixels, 0, sizeof(float), rStream ) );
/* memset is on the same stream as kernel will be, so no synchronize needed! */
hipLaunchKernelGGL(( cudaKernelCalculateHioError), dim3(nBlocks), dim3(nThreads), 0, rStream ,
rdpData, rdpIsMasked, rnElements, rInvertMask, dpTotalError, dpnMaskedPixels );
CUDA_ERROR( hipStreamSynchronize( rStream ) );
CUDA_ERROR( hipMemcpyAsync( &totalError , dpTotalError , sizeof(float), hipMemcpyDeviceToHost, rStream ) );
CUDA_ERROR( hipMemcpyAsync( &nMaskedPixels, dpnMaskedPixels, sizeof(float), hipMemcpyDeviceToHost, rStream ) );
CUDA_ERROR( hipStreamSynchronize( rStream ) );
CUDA_ERROR( hipFree( dpTotalError ) );
CUDA_ERROR( hipFree( dpnMaskedPixels ) );
return sqrtf(totalError) / nMaskedPixels;
}
/* explicit instantiations */
template
float cudaVectorMin<float>
(
const float * const rdpData,
const unsigned rnElements,
hipStream_t rStream
);
template
double cudaVectorMin<double>
(
const double * const rdpData,
const unsigned rnElements,
hipStream_t rStream
);
template
float cudaVectorMax<float>
(
const float * const rdpData,
const unsigned rnElements,
hipStream_t rStream
);
template
double cudaVectorMax<double>
(
const double * const rdpData,
const unsigned rnElements,
hipStream_t rStream
);
template
float cudaVectorSum<float>
(
const float * const rdpData,
const unsigned rnElements,
hipStream_t rStream
);
template
double cudaVectorSum<double>
(
const double * const rdpData,
const unsigned rnElements,
hipStream_t rStream
);
template
__global__ void cudaKernelCalculateHioError
<hipfftComplex, float>
(
const hipfftComplex * const rdpgPrime,
const float * const rdpIsMasked,
const unsigned rnData,
const bool rInvertMask,
float * const rdpTotalError,
float * const rdpnMaskedPixels
);
template
float calculateHioError
<hipfftComplex, float>
(
const hipfftComplex * const & rdpData,
const float * const & rdpIsMasked,
const unsigned & rnElements,
const bool & rInvertMask,
hipStream_t rStream
);
template
float cudaVectorMaxSharedMemory<float>
(
const float * const rdpData,
const unsigned rnElements,
hipStream_t rStream
);
template
float cudaVectorMaxSharedMemoryWarps<float>
(
const float * const rdpData,
const unsigned rnElements,
hipStream_t rStream
);
} // namespace cuda
} // namespace algorithms
} // namespace imresh
| 68535c7b2388fc53180ca3af4a5fed88a530df3b.cu | /*
* The MIT License (MIT)
*
* Copyright (c) 2015-2016 Maximilian Knespel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "cudaVectorReduce.hpp"
#include <cassert>
#include <cstdint> // uint64_t
#include <limits> // lowest
#include <cmath>
#include <cuda.h> // atomicCAS
#include <cufft.h> // cufftComplex, cufftDoubleComplex
#include "libs/cudacommon.h"
namespace imresh
{
namespace algorithms
{
namespace cuda
{
SumFunctor<float > sumFunctorf;
MinFunctor<float > minFunctorf;
MaxFunctor<float > maxFunctorf;
SumFunctor<double> sumFunctord;
MinFunctor<double> minFunctord;
MaxFunctor<double> maxFunctord;
template<class T_PREC, class T_FUNC>
__device__ inline void atomicFunc
(
T_PREC * const rdpTarget,
const T_PREC rValue,
T_FUNC f
)
{
/* atomicCAS only is defined for int and long long int, thats why we
* need these roundabout casts */
int assumed;
int old = * (int*) rdpTarget;
/* atomicCAS returns the value with which the current value 'assumed'
* was compared. If the value changed between reading out to assumed
* and calculating the reduced value and storing it back, then we
* need to call this function again. (I hope the GPU has some
* functionality to prevent synchronized i.e. neverending races ... */
do
{
assumed = old;
/* If the reduced value doesn't change, then we don't need to hinder
* other threads with atomicCAS. This additional check may prove a
* bottleneck, if this is rarely the case, e.g. for sum and no 0s or
* for max and an ordered list, where the largest is the last
* element. In tests this more often slowed down the calculation */
//if ( f( __int_as_float(assumed), rValue ) == assumed )
// break;
/* compare and swap after the value was read with assumend, return
* old value, if assumed isn't anymore the value at rdpTarget,
* then we will have to try again to write it */
old = atomicCAS( (int*) rdpTarget, assumed,
__float_as_int( f( __int_as_float(assumed), rValue ) ) );
}
while ( assumed != old );
}
template<>
__device__ inline void atomicFunc<int,MaxFunctor<int>>
(
int * const rdpTarget,
const int rValue,
MaxFunctor<int> f
)
{
atomicMax( rdpTarget, rValue );
}
/*
// seems to work for testVectorReduce, but it shouldn't oO, maybe just good numbers, or because this is only for max, maybe it wouldn't work for min, because the maximum is > 0 ... In the end it isn't faster than atomicCAS and it doesn't even use floatAsOrderdInt yet, which would make use of bitshift, subtraction and logical or, thereby decreasing performance even more: http://stereopsis.com/radix.html
template<>
__device__ inline void atomicFunc<float,MaxFunctor<float>>
(
float * const rdpTarget,
const float rValue,
MaxFunctor<float> f
)
{
atomicMax( (int*)rdpTarget, __float_as_int(rValue) );
}*/
template<class T_PREC, class T_FUNC>
__global__ void kernelVectorReduceShared
(
const T_PREC * const rdpData,
const unsigned rnData,
T_PREC * const rdpResult,
T_FUNC f,
const T_PREC rInitValue
)
{
assert( blockDim.y == 1 );
assert( blockDim.z == 1 );
assert( gridDim.y == 1 );
assert( gridDim.z == 1 );
const int32_t nTotalThreads = gridDim.x * blockDim.x;
int32_t i = blockIdx.x * blockDim.x + threadIdx.x;
assert( i < nTotalThreads );
T_PREC localReduced = T_PREC(rInitValue);
for ( ; i < rnData; i += nTotalThreads )
localReduced = f( localReduced, rdpData[i] );
__shared__ T_PREC smReduced;
/* master thread of every block shall set shared mem variable to 0 */
__syncthreads();
if ( threadIdx.x == 0 )
smReduced = T_PREC(rInitValue);
__syncthreads();
atomicFunc( &smReduced, localReduced, f );
__syncthreads();
if ( threadIdx.x == 0 )
atomicFunc( rdpResult, smReduced, f );
}
/**
* benchmarks suggest that this kernel is twice as fast as
* kernelVectorReduceShared
**/
template<class T_PREC, class T_FUNC>
__global__ void kernelVectorReduceSharedMemoryWarps
(
const T_PREC * const rdpData,
const unsigned rnData,
T_PREC * const rdpResult,
T_FUNC f,
const T_PREC rInitValue
)
{
assert( blockDim.y == 1 );
assert( blockDim.z == 1 );
assert( gridDim.y == 1 );
assert( gridDim.z == 1 );
const int32_t nTotalThreads = gridDim.x * blockDim.x;
int32_t i = blockIdx.x * blockDim.x + threadIdx.x;
assert( i < nTotalThreads );
T_PREC localReduced = T_PREC(rInitValue);
for ( ; i < rnData; i += nTotalThreads )
localReduced = f( localReduced, rdpData[i] );
/**
* reduce per warp:
* With __shfl_down we can read the register values of other lanes in
* a warp. In the first iteration lane 0 will add to it's value the
* value of lane 16, lane 1 from lane 17 and so in.
* In the next step lane 0 will add the result from lane 8.
* In the end lane 0 will have the reduced value.
* @see http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
**/
constexpr int warpSize = 32;
const int32_t laneId = threadIdx.x % warpSize;
for ( int32_t warpDelta = warpSize / 2; warpDelta > 0; warpDelta /= 2)
localReduced = f( localReduced, __shfl_down( localReduced, warpDelta ) );
__shared__ T_PREC smReduced;
/* master thread of every block shall set shared mem variable to 0 */
__syncthreads();
if ( threadIdx.x == 0 )
smReduced = T_PREC(rInitValue);
__syncthreads();
if ( laneId == 0 )
atomicFunc( &smReduced, localReduced, f );
__syncthreads();
if ( threadIdx.x == 0 )
atomicFunc( rdpResult, smReduced, f );
}
template<class T_PREC, class T_FUNC>
__global__ void kernelVectorReduceWarps
(
const T_PREC * const rdpData,
const unsigned rnData,
T_PREC * const rdpResult,
T_FUNC f,
const T_PREC rInitValue
)
{
assert( blockDim.y == 1 );
assert( blockDim.z == 1 );
assert( gridDim.y == 1 );
assert( gridDim.z == 1 );
const int32_t nTotalThreads = gridDim.x * blockDim.x;
int32_t i = blockIdx.x * blockDim.x + threadIdx.x;
assert( i < nTotalThreads );
T_PREC localReduced = T_PREC(rInitValue);
for ( ; i < rnData; i += nTotalThreads )
localReduced = f( localReduced, rdpData[i] );
/* reduce per warp (warpSize == 32 assumed) */
const int32_t laneId = threadIdx.x % 32;
#pragma unroll
for ( int32_t warpDelta = 32 / 2; warpDelta > 0; warpDelta /= 2)
localReduced = f( localReduced, __shfl_down( localReduced, warpDelta ) );
if ( laneId == 0 )
atomicFunc( rdpResult, localReduced, f );
}
template<class T_PREC, class T_FUNC>
T_PREC cudaReduce
(
const T_PREC * const rdpData,
const unsigned rnElements,
T_FUNC f,
const T_PREC rInitValue,
cudaStream_t rStream
)
{
const unsigned nThreads = 128;
//const unsigned nBlocks = ceil( (float) rnElements / nThreads );
//printf( "nThreads = %i, nBlocks = %i\n", nThreads, nBlocks );
const unsigned nBlocks = 288;
/* 256*256 = 65536 concurrent threads should fill most modern graphic
* cards. E.g. GTX 760 can only handle 12288 runnin concurrently,
* everything else will be run after some threads finished. The
* number of kernels is only 384, because of oversubscription with
* warps */
assert( nBlocks < 65536 );
T_PREC reducedValue;
T_PREC * dpReducedValue;
T_PREC initValue = rInitValue;
CUDA_ERROR( cudaMalloc( (void**) &dpReducedValue, sizeof(float) ) );
CUDA_ERROR( cudaMemcpyAsync( dpReducedValue, &initValue, sizeof(float), cudaMemcpyHostToDevice, rStream ) );
/* memcpy is on the same stream as kernel will be, so no synchronize needed! */
kernelVectorReduceWarps<<< nBlocks, nThreads, 0, rStream >>>
( rdpData, rnElements, dpReducedValue, f, rInitValue );
CUDA_ERROR( cudaStreamSynchronize( rStream ) );
CUDA_ERROR( cudaMemcpyAsync( &reducedValue, dpReducedValue, sizeof(float), cudaMemcpyDeviceToHost, rStream ) );
CUDA_ERROR( cudaStreamSynchronize( rStream) );
CUDA_ERROR( cudaFree( dpReducedValue ) );
return reducedValue;
}
template<class T_PREC, class T_FUNC>
T_PREC cudaReduceSharedMemory
(
const T_PREC * const rdpData,
const unsigned rnElements,
T_FUNC f,
const T_PREC rInitValue,
cudaStream_t rStream
)
{
/* the more threads we have the longer the reduction will be
* done inside shared memory instead of global memory */
const unsigned nThreads = 256;
const unsigned nBlocks = 256;
assert( nBlocks < 65536 );
T_PREC reducedValue;
T_PREC * dpReducedValue;
T_PREC initValue = rInitValue;
CUDA_ERROR( cudaMalloc( (void**) &dpReducedValue, sizeof(float) ) );
CUDA_ERROR( cudaMemcpyAsync( dpReducedValue, &initValue, sizeof(float), cudaMemcpyHostToDevice, rStream ) );
/* memcpy is on the same stream as kernel will be, so no synchronize needed! */
kernelVectorReduceShared<<< nBlocks, nThreads, 0, rStream >>>
( rdpData, rnElements, dpReducedValue, f, rInitValue );
CUDA_ERROR( cudaStreamSynchronize( rStream ) );
CUDA_ERROR( cudaMemcpyAsync( &reducedValue, dpReducedValue, sizeof(float), cudaMemcpyDeviceToHost, rStream ) );
CUDA_ERROR( cudaStreamSynchronize( rStream) );
CUDA_ERROR( cudaFree( dpReducedValue ) );
return reducedValue;
}
template<class T_PREC, class T_FUNC>
T_PREC cudaReduceSharedMemoryWarps
(
const T_PREC * const rdpData,
const unsigned rnElements,
T_FUNC f,
const T_PREC rInitValue,
cudaStream_t rStream
)
{
const unsigned nThreads = 256;
const unsigned nBlocks = 256;
assert( nBlocks < 65536 );
T_PREC reducedValue;
T_PREC * dpReducedValue;
T_PREC initValue = rInitValue;
CUDA_ERROR( cudaMalloc( (void**) &dpReducedValue, sizeof(float) ) );
CUDA_ERROR( cudaMemcpyAsync( dpReducedValue, &initValue, sizeof(float), cudaMemcpyHostToDevice, rStream ) );
/* memcpy is on the same stream as kernel will be, so no synchronize needed! */
kernelVectorReduceSharedMemoryWarps<<< nBlocks, nThreads, 0, rStream >>>
( rdpData, rnElements, dpReducedValue, f, rInitValue );
CUDA_ERROR( cudaStreamSynchronize( rStream ) );
CUDA_ERROR( cudaMemcpyAsync( &reducedValue, dpReducedValue, sizeof(float), cudaMemcpyDeviceToHost, rStream ) );
CUDA_ERROR( cudaStreamSynchronize( rStream) );
CUDA_ERROR( cudaFree( dpReducedValue ) );
return reducedValue;
}
template<class T_PREC>
T_PREC cudaVectorMin
(
const T_PREC * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
)
{
MinFunctor<T_PREC> minFunctor;
return cudaReduce( rdpData, rnElements, minFunctor, std::numeric_limits<T_PREC>::max(), rStream );
}
template<class T_PREC>
T_PREC cudaVectorMax
(
const T_PREC * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
)
{
MaxFunctor<T_PREC> maxFunctor;
return cudaReduce( rdpData, rnElements, maxFunctor, std::numeric_limits<T_PREC>::lowest(), rStream );
}
template<class T_PREC>
T_PREC cudaVectorSum
(
const T_PREC * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
)
{
SumFunctor<T_PREC> sumFunctor;
return cudaReduce( rdpData, rnElements, sumFunctor, T_PREC(0), rStream );
}
/* These functions only persist for benchmarking purposes to show that
* the standard version is the fastest */
template<class T_PREC>
T_PREC cudaVectorMaxSharedMemory
(
const T_PREC * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
)
{
MaxFunctor<T_PREC> maxFunctor;
return cudaReduceSharedMemory( rdpData, rnElements, maxFunctor, std::numeric_limits<T_PREC>::lowest(), rStream );
}
template<class T_PREC>
T_PREC cudaVectorMaxSharedMemoryWarps
(
const T_PREC * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
)
{
MaxFunctor<T_PREC> maxFunctor;
return cudaReduceSharedMemoryWarps( rdpData, rnElements, maxFunctor, std::numeric_limits<T_PREC>::lowest(), rStream );
}
/**
* "For the input-output algorithms the error E_F is
* usually meaningless since the input g_k(X) is no longer
* an estimate of the object. Then the meaningful error
* is the object-domain error E_0 given by Eq. (15)."
* (Fienup82)
* Eq.15:
* @f[ E_{0k}^2 = \sum\limits_{x\in\gamma} |g_k'(x)^2|^2 @f]
* where \gamma is the domain at which the constraints are
* not met. SO this is the sum over the domain which should
* be 0.
*
* Eq.16:
* @f[ E_{Fk}^2 = \sum\limits_{u} |G_k(u) - G_k'(u)|^2 / N^2
= \sum_x |g_k(x) - g_k'(x)|^2 @f]
**/
template< class T_COMPLEX, class T_MASK_ELEMENT >
__global__ void cudaKernelCalculateHioError
(
const T_COMPLEX * const rdpgPrime,
const T_MASK_ELEMENT * const rdpIsMasked,
const unsigned rnData,
const bool rInvertMask,
float * const rdpTotalError,
float * const rdpnMaskedPixels
)
{
assert( blockDim.y == 1 );
assert( blockDim.z == 1 );
assert( gridDim.y == 1 );
assert( gridDim.z == 1 );
const int32_t nTotalThreads = gridDim.x * blockDim.x;
int32_t i = blockIdx.x * blockDim.x + threadIdx.x;
assert( i < nTotalThreads );
float localTotalError = 0;
float localnMaskedPixels = 0;
for ( ; i < rnData; i += nTotalThreads )
{
const auto & re = rdpgPrime[i].x;
const auto & im = rdpgPrime[i].y;
/* only add up norm where no object should be (rMask == 0) */
/* note: invert + masked -> unmasked <=> 1 ? 1 -> 0
* noinvert + masked -> masked <=> 0 ? 1 -> 1
* invert + unmasked -> masked <=> 1 ? 0 -> 1
* noinvert + unmasked -> unmasked <=> 0 ? 0 -> 0
* => ? is xor => no thread divergence
*/
assert( rdpIsMasked[i] == 0 or rdpIsMasked[i] == 1 );
const bool shouldBeZero = rInvertMask xor (bool) rdpIsMasked[i];
assert( rdpIsMasked[i] >= 0.0 and rdpIsMasked[i] <= 1.0 );
//float shouldBeZero = rInvertMask + ( 1-2*rInvertMask )*rdpIsMasked[i];
/*
float shouldBeZero = rdpIsMasked[i];
if ( rInvertMask )
shouldBeZero = 1 - shouldBeZero;
*/
localTotalError += shouldBeZero * ( re*re+im*im );
localnMaskedPixels += shouldBeZero;
}
/* reduce per warp (warpSize == 32 assumed) */
const int32_t laneId = threadIdx.x % 32;
#pragma unroll
for ( int32_t warpDelta = 32 / 2; warpDelta > 0; warpDelta /= 2 )
{
localTotalError += __shfl_down( localTotalError , warpDelta );
localnMaskedPixels += __shfl_down( localnMaskedPixels, warpDelta );
}
SumFunctor<float> sum;
if ( laneId == 0 )
{
atomicFunc( rdpTotalError , localTotalError , sum );
atomicFunc( rdpnMaskedPixels, localnMaskedPixels, sum );
}
}
template<class T_COMPLEX, class T_MASK_ELEMENT>
float calculateHioError
(
const T_COMPLEX * const & rdpData,
const T_MASK_ELEMENT * const & rdpIsMasked,
const unsigned & rnElements,
const bool & rInvertMask,
cudaStream_t rStream
)
{
const unsigned nThreads = 256;
//const unsigned nBlocks = ceil( (float) rnElements / nThreads );
const unsigned nBlocks = 256;
assert( nBlocks < 65536 );
float totalError, nMaskedPixels;
float * dpTotalError, * dpnMaskedPixels;
CUDA_ERROR( cudaMalloc( (void**) &dpTotalError , sizeof(float) ) );
CUDA_ERROR( cudaMalloc( (void**) &dpnMaskedPixels, sizeof(float) ) );
CUDA_ERROR( cudaMemsetAsync( dpTotalError , 0, sizeof(float), rStream ) );
CUDA_ERROR( cudaMemsetAsync( dpnMaskedPixels, 0, sizeof(float), rStream ) );
/* memset is on the same stream as kernel will be, so no synchronize needed! */
cudaKernelCalculateHioError<<< nBlocks, nThreads, 0, rStream >>>
( rdpData, rdpIsMasked, rnElements, rInvertMask, dpTotalError, dpnMaskedPixels );
CUDA_ERROR( cudaStreamSynchronize( rStream ) );
CUDA_ERROR( cudaMemcpyAsync( &totalError , dpTotalError , sizeof(float), cudaMemcpyDeviceToHost, rStream ) );
CUDA_ERROR( cudaMemcpyAsync( &nMaskedPixels, dpnMaskedPixels, sizeof(float), cudaMemcpyDeviceToHost, rStream ) );
CUDA_ERROR( cudaStreamSynchronize( rStream ) );
CUDA_ERROR( cudaFree( dpTotalError ) );
CUDA_ERROR( cudaFree( dpnMaskedPixels ) );
return sqrtf(totalError) / nMaskedPixels;
}
/* explicit instantiations */
template
float cudaVectorMin<float>
(
const float * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
);
template
double cudaVectorMin<double>
(
const double * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
);
template
float cudaVectorMax<float>
(
const float * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
);
template
double cudaVectorMax<double>
(
const double * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
);
template
float cudaVectorSum<float>
(
const float * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
);
template
double cudaVectorSum<double>
(
const double * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
);
template
__global__ void cudaKernelCalculateHioError
<cufftComplex, float>
(
const cufftComplex * const rdpgPrime,
const float * const rdpIsMasked,
const unsigned rnData,
const bool rInvertMask,
float * const rdpTotalError,
float * const rdpnMaskedPixels
);
template
float calculateHioError
<cufftComplex, float>
(
const cufftComplex * const & rdpData,
const float * const & rdpIsMasked,
const unsigned & rnElements,
const bool & rInvertMask,
cudaStream_t rStream
);
template
float cudaVectorMaxSharedMemory<float>
(
const float * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
);
template
float cudaVectorMaxSharedMemoryWarps<float>
(
const float * const rdpData,
const unsigned rnElements,
cudaStream_t rStream
);
} // namespace cuda
} // namespace algorithms
} // namespace imresh
|
8a6c8a25413a1ade2cc8656220b768e448ef3fff.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <iostream>
#include <cmath>
using namespace std;
//////////////////////////////////////////////////// Useful Macros ////////////////////////////////////////////////////
#define MAX_PTS_PER_BIN 16 // Max capacity of each bin
#define NUM_THREADS_PER_BLK 256 // Number of threads per block
#define BIN_SIZE 0.01 // Length of bin side
// Error checking wrapper macro
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
// Indexing bin to retrieve particle
#define IDX(bin_idx, pt_idx) bin_idx * MAX_PTS_PER_BIN + pt_idx
/////////////////////////////////////////////////// Global Variables ///////////////////////////////////////////////////
int Num_Bins_Per_Side; // Number of bins per side
int Total_Num_Bins; // Total number of bins
int Num_Blocks_By_Pt; // Number of blocks (for particle iteration)
int Num_Blocks_By_Bin; // Number of blocks (for bin iteration)
int* Bins; // Bins containing particle indices
int* Bin_Sizes; // Actual size of each bin
/////////////////////////////////////////////////// Helper Functions ///////////////////////////////////////////////////
// Apply force on the particle based on neighbor's position
__device__ void apply_force(particle_t* particle, particle_t* neighbor) {
// Calculate Distance
double dx = neighbor->x - particle->x;
double dy = neighbor->y - particle->y;
double r2 = dx * dx + dy * dy;
// Check if the two particles should interact
if (r2 > cutoff * cutoff)
return;
r2 = fmax(r2, min_r * min_r);
double r = sqrt(r2);
// Very simple short-range repulsive force
double coef = (1 - cutoff / r) / r2 / mass;
particle->ax += coef * dx;
particle->ay += coef * dy;
}
// For a particle, make it interact with all particles in a neighboring bin.
__device__ void interact_with_neighbor(particle_t* parts, int self_pt_idx,
int* Bins, int* Bin_Sizes, int Num_Bins_Per_Side,
int nei_bin_row, int nei_bin_col) {
// Check if the neighbor is valid (within bound)
if (nei_bin_row < 0 || nei_bin_row >= Num_Bins_Per_Side ||
nei_bin_col < 0 || nei_bin_col >= Num_Bins_Per_Side)
return;
// Interact with all particles in the neighbor bin
int bin_idx = nei_bin_row * Num_Bins_Per_Side + nei_bin_col;
int num_nei_pts = Bin_Sizes[bin_idx];
for (int i = 0; i < num_nei_pts; ++i) {
int nei_pt_idx = Bins[IDX(bin_idx, i)];
apply_force(&parts[self_pt_idx], &parts[nei_pt_idx]);
}
}
// Assert-style handler
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/////////////////////////////////////////////////////// Kernels ///////////////////////////////////////////////////////
// Set all bin sizes to 0
__global__ void reset_bin_sizes(int* Bin_Sizes, int Total_Num_Bins) {
// Calculate thread/bin index
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= Total_Num_Bins)
return;
Bin_Sizes[idx] = 0; // "Clear" bin
}
// Associate each particle with its corresponding bin
__global__ void rebinning(particle_t* parts, int num_parts,
int Num_Bins_Per_Side, int* Bins, int* Bin_Sizes) {
// Calculate thread/particle index
int pt_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (pt_idx >= num_parts)
return;
// // Exhaust all acceleration to prepare for force calculation
// parts[pt_idx].ax = parts[pt_idx].ay = 0;
// Determine which bin to put the particle
particle_t& pt = parts[pt_idx];
const int bin_row = floor(pt.x / BIN_SIZE);
const int bin_col = floor(pt.y / BIN_SIZE);
const int bin_idx = bin_row * Num_Bins_Per_Side + bin_col;
// Increment bin size atomically
int old_bin_size = atomicAdd(&Bin_Sizes[bin_idx], 1);
// Store particle index in bin
Bins[IDX(bin_idx, old_bin_size)] = pt_idx;
}
// Calculate forces bin-by-bin
__global__ void compute_forces_gpu(particle_t* parts, int* Bins, int* Bin_Sizes, int Num_Bins_Per_Side) {
// Get thread/bin index
int bin_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (bin_idx >= Num_Bins_Per_Side * Num_Bins_Per_Side)
return;
// Calculate row index & column index of this bin
int row = bin_idx / Num_Bins_Per_Side;
int col = bin_idx % Num_Bins_Per_Side;
// For each particle in this bin
int my_pts_cnt = Bin_Sizes[bin_idx];
for (int i = 0; i < my_pts_cnt; ++i) {
int pt_idx = Bins[IDX(bin_idx, i)];
// Exhaust all acceleration of this particle
parts[pt_idx].ax = parts[pt_idx].ay = 0;
// Interact with all 9 valid neighbor bins
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row , col ); // Self
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row - 1, col ); // Top
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row + 1, col ); // Bottom
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row , col - 1); // Left
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row , col + 1); // Right
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row - 1, col - 1); // Top left
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row - 1, col + 1); // Top right
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row + 1, col - 1); // Bottom left
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row + 1, col + 1); // Bottom right
}
}
// Move each particle
__global__ void move_gpu(particle_t* particles, int num_parts, double size) {
// Get thread (particle) ID
int pt_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (pt_idx >= num_parts)
return;
// Get particle reference
particle_t& p = particles[pt_idx];
// Slightly simplified Velocity Verlet integration
// Conserves energy better than explicit Euler method
p.vx += p.ax * dt;
p.vy += p.ay * dt;
p.x += p.vx * dt;
p.y += p.vy * dt;
// Bounce from walls
while (p.x < 0 || p.x > size) {
p.x = p.x < 0 ? -p.x : 2 * size - p.x;
p.vx = -p.vx;
}
while (p.y < 0 || p.y > size) {
p.y = p.y < 0 ? -p.y : 2 * size - p.y;
p.vy = -p.vy;
}
}
//////////////////////////////////////////////////// Key Functions ////////////////////////////////////////////////////
void init_simulation(particle_t* parts, int num_parts, double size) {
// Calculate number of bins
Num_Bins_Per_Side = ceil(size / BIN_SIZE);
Total_Num_Bins = Num_Bins_Per_Side * Num_Bins_Per_Side;
// Calculate number of blocks by particle and by bin (ceiling division)
Num_Blocks_By_Pt = (num_parts + NUM_THREADS_PER_BLK - 1) / NUM_THREADS_PER_BLK;
Num_Blocks_By_Bin = (Total_Num_Bins + NUM_THREADS_PER_BLK - 1) / NUM_THREADS_PER_BLK;
// Allocate memory to bins
gpuErrorCheck( hipMalloc(&Bins, Total_Num_Bins * MAX_PTS_PER_BIN * sizeof(int)) );
gpuErrorCheck( hipMalloc(&Bin_Sizes, Total_Num_Bins * sizeof(int)) );
}
void simulate_one_step(particle_t* parts, int num_parts, double size) {
// Clearing bins (each thread handles a bin)
hipLaunchKernelGGL(( reset_bin_sizes), dim3(Num_Blocks_By_Bin), dim3(NUM_THREADS_PER_BLK), 0, 0, Bin_Sizes, Total_Num_Bins);
gpuErrorCheck( hipPeekAtLastError() );
gpuErrorCheck( hipDeviceSynchronize() );
// Assigning particles to bins (each thread handles a particle)
hipLaunchKernelGGL(( rebinning), dim3(Num_Blocks_By_Pt), dim3(NUM_THREADS_PER_BLK), 0, 0, parts, num_parts,
Num_Bins_Per_Side, Bins, Bin_Sizes);
gpuErrorCheck( hipPeekAtLastError() );
gpuErrorCheck( hipDeviceSynchronize() );
// Compute interaction forces (each thread handles a bin)
hipLaunchKernelGGL(( compute_forces_gpu), dim3(Num_Blocks_By_Bin), dim3(NUM_THREADS_PER_BLK), 0, 0, parts, Bins, Bin_Sizes, Num_Bins_Per_Side);
gpuErrorCheck( hipPeekAtLastError() );
gpuErrorCheck( hipDeviceSynchronize() );
// Move particles (each thread handles a particle)
hipLaunchKernelGGL(( move_gpu), dim3(Num_Blocks_By_Pt), dim3(NUM_THREADS_PER_BLK), 0, 0, parts, num_parts, size);
gpuErrorCheck( hipPeekAtLastError() );
gpuErrorCheck( hipDeviceSynchronize() );
} | 8a6c8a25413a1ade2cc8656220b768e448ef3fff.cu | #include "common.h"
#include <cuda.h>
#include <iostream>
#include <cmath>
using namespace std;
//////////////////////////////////////////////////// Useful Macros ////////////////////////////////////////////////////
#define MAX_PTS_PER_BIN 16 // Max capacity of each bin
#define NUM_THREADS_PER_BLK 256 // Number of threads per block
#define BIN_SIZE 0.01 // Length of bin side
// Error checking wrapper macro
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
// Indexing bin to retrieve particle
#define IDX(bin_idx, pt_idx) bin_idx * MAX_PTS_PER_BIN + pt_idx
/////////////////////////////////////////////////// Global Variables ///////////////////////////////////////////////////
int Num_Bins_Per_Side; // Number of bins per side
int Total_Num_Bins; // Total number of bins
int Num_Blocks_By_Pt; // Number of blocks (for particle iteration)
int Num_Blocks_By_Bin; // Number of blocks (for bin iteration)
int* Bins; // Bins containing particle indices
int* Bin_Sizes; // Actual size of each bin
/////////////////////////////////////////////////// Helper Functions ///////////////////////////////////////////////////
// Apply force on the particle based on neighbor's position
__device__ void apply_force(particle_t* particle, particle_t* neighbor) {
// Calculate Distance
double dx = neighbor->x - particle->x;
double dy = neighbor->y - particle->y;
double r2 = dx * dx + dy * dy;
// Check if the two particles should interact
if (r2 > cutoff * cutoff)
return;
r2 = fmax(r2, min_r * min_r);
double r = sqrt(r2);
// Very simple short-range repulsive force
double coef = (1 - cutoff / r) / r2 / mass;
particle->ax += coef * dx;
particle->ay += coef * dy;
}
// For a particle, make it interact with all particles in a neighboring bin.
__device__ void interact_with_neighbor(particle_t* parts, int self_pt_idx,
int* Bins, int* Bin_Sizes, int Num_Bins_Per_Side,
int nei_bin_row, int nei_bin_col) {
// Check if the neighbor is valid (within bound)
if (nei_bin_row < 0 || nei_bin_row >= Num_Bins_Per_Side ||
nei_bin_col < 0 || nei_bin_col >= Num_Bins_Per_Side)
return;
// Interact with all particles in the neighbor bin
int bin_idx = nei_bin_row * Num_Bins_Per_Side + nei_bin_col;
int num_nei_pts = Bin_Sizes[bin_idx];
for (int i = 0; i < num_nei_pts; ++i) {
int nei_pt_idx = Bins[IDX(bin_idx, i)];
apply_force(&parts[self_pt_idx], &parts[nei_pt_idx]);
}
}
// Assert-style handler
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/////////////////////////////////////////////////////// Kernels ///////////////////////////////////////////////////////
// Set all bin sizes to 0
__global__ void reset_bin_sizes(int* Bin_Sizes, int Total_Num_Bins) {
// Calculate thread/bin index
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= Total_Num_Bins)
return;
Bin_Sizes[idx] = 0; // "Clear" bin
}
// Associate each particle with its corresponding bin
__global__ void rebinning(particle_t* parts, int num_parts,
int Num_Bins_Per_Side, int* Bins, int* Bin_Sizes) {
// Calculate thread/particle index
int pt_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (pt_idx >= num_parts)
return;
// // Exhaust all acceleration to prepare for force calculation
// parts[pt_idx].ax = parts[pt_idx].ay = 0;
// Determine which bin to put the particle
particle_t& pt = parts[pt_idx];
const int bin_row = floor(pt.x / BIN_SIZE);
const int bin_col = floor(pt.y / BIN_SIZE);
const int bin_idx = bin_row * Num_Bins_Per_Side + bin_col;
// Increment bin size atomically
int old_bin_size = atomicAdd(&Bin_Sizes[bin_idx], 1);
// Store particle index in bin
Bins[IDX(bin_idx, old_bin_size)] = pt_idx;
}
// Calculate forces bin-by-bin
__global__ void compute_forces_gpu(particle_t* parts, int* Bins, int* Bin_Sizes, int Num_Bins_Per_Side) {
// Get thread/bin index
int bin_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (bin_idx >= Num_Bins_Per_Side * Num_Bins_Per_Side)
return;
// Calculate row index & column index of this bin
int row = bin_idx / Num_Bins_Per_Side;
int col = bin_idx % Num_Bins_Per_Side;
// For each particle in this bin
int my_pts_cnt = Bin_Sizes[bin_idx];
for (int i = 0; i < my_pts_cnt; ++i) {
int pt_idx = Bins[IDX(bin_idx, i)];
// Exhaust all acceleration of this particle
parts[pt_idx].ax = parts[pt_idx].ay = 0;
// Interact with all 9 valid neighbor bins
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row , col ); // Self
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row - 1, col ); // Top
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row + 1, col ); // Bottom
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row , col - 1); // Left
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row , col + 1); // Right
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row - 1, col - 1); // Top left
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row - 1, col + 1); // Top right
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row + 1, col - 1); // Bottom left
interact_with_neighbor(parts, pt_idx, Bins, Bin_Sizes, Num_Bins_Per_Side, row + 1, col + 1); // Bottom right
}
}
// Move each particle
__global__ void move_gpu(particle_t* particles, int num_parts, double size) {
// Get thread (particle) ID
int pt_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (pt_idx >= num_parts)
return;
// Get particle reference
particle_t& p = particles[pt_idx];
// Slightly simplified Velocity Verlet integration
// Conserves energy better than explicit Euler method
p.vx += p.ax * dt;
p.vy += p.ay * dt;
p.x += p.vx * dt;
p.y += p.vy * dt;
// Bounce from walls
while (p.x < 0 || p.x > size) {
p.x = p.x < 0 ? -p.x : 2 * size - p.x;
p.vx = -p.vx;
}
while (p.y < 0 || p.y > size) {
p.y = p.y < 0 ? -p.y : 2 * size - p.y;
p.vy = -p.vy;
}
}
//////////////////////////////////////////////////// Key Functions ////////////////////////////////////////////////////
void init_simulation(particle_t* parts, int num_parts, double size) {
// Calculate number of bins
Num_Bins_Per_Side = ceil(size / BIN_SIZE);
Total_Num_Bins = Num_Bins_Per_Side * Num_Bins_Per_Side;
// Calculate number of blocks by particle and by bin (ceiling division)
Num_Blocks_By_Pt = (num_parts + NUM_THREADS_PER_BLK - 1) / NUM_THREADS_PER_BLK;
Num_Blocks_By_Bin = (Total_Num_Bins + NUM_THREADS_PER_BLK - 1) / NUM_THREADS_PER_BLK;
// Allocate memory to bins
gpuErrorCheck( cudaMalloc(&Bins, Total_Num_Bins * MAX_PTS_PER_BIN * sizeof(int)) );
gpuErrorCheck( cudaMalloc(&Bin_Sizes, Total_Num_Bins * sizeof(int)) );
}
void simulate_one_step(particle_t* parts, int num_parts, double size) {
// Clearing bins (each thread handles a bin)
reset_bin_sizes<<<Num_Blocks_By_Bin, NUM_THREADS_PER_BLK>>>(Bin_Sizes, Total_Num_Bins);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
// Assigning particles to bins (each thread handles a particle)
rebinning<<<Num_Blocks_By_Pt, NUM_THREADS_PER_BLK>>>(parts, num_parts,
Num_Bins_Per_Side, Bins, Bin_Sizes);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
// Compute interaction forces (each thread handles a bin)
compute_forces_gpu<<<Num_Blocks_By_Bin, NUM_THREADS_PER_BLK>>>(parts, Bins, Bin_Sizes, Num_Bins_Per_Side);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
// Move particles (each thread handles a particle)
move_gpu<<<Num_Blocks_By_Pt, NUM_THREADS_PER_BLK>>>(parts, num_parts, size);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
} |
6b1a18c8abd36a167a1e5e6fa70f391efda24b32.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <iostream>
#include <hip/hip_runtime.h>
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
exit(EXIT_FAILURE);
}
}
__global__ void Hillis_Steele_Scan_Kernel(float *d_out, float *d_intermediate, float *d_in) {
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
int pout = 0, pin = 1;
sdata[tid] = d_in[myId];
__syncthreads();
for(unsigned int s = 1; s < blockDim.x; s <<= 1) {
pout = 1 - pout;
pin = 1 - pout;
if (tid >= s)
sdata[pout*blockDim.x + tid] = sdata[pin*blockDim.x + tid] + sdata[pin*blockDim.x + tid - s];
else
sdata[pout*blockDim.x + tid] = sdata[pin*blockDim.x + tid];
__syncthreads();
}
if (tid == blockDim.x - 1) {
d_intermediate[blockIdx.x] = sdata[blockDim.x - 1];
}
d_out[myId] = sdata[tid];
}
__global__ void Blelloch_Scan_Kernel(float *d_out, float *d_intermediate, float *d_in) {
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
sdata[tid] = d_in[myId];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid]
}
}
}
__global__ void Sum_Kernel(float *d_out, float *d_intermediate, float *d_in) {
}
void scan(float *d_out, float *d_in, int size) {
float *d_intermediate;
const int maxThreadsPerBlock = 1024;
int threads = maxThreadsPerBlock;
int blocks = size/threads;
checkCudaErrors(hipMalloc(&d_intermediate, blocks * sizeof(float)));
hipLaunchKernelGGL(( Hillis_Steele_Scan_Kernel), dim3(blocks), dim3(threads), 2 * threads * sizeof(float), 0,
d_out, d_intermediate, d_in);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
threads = blocks;
blocks = 1;
hipLaunchKernelGGL(( Blelloch_Scan_Kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0,
d_intermediate, d_intermediate, d_intermediate);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
threads = maxThreadsPerBlock;
blocks = size/threads;
hipLaunchKernelGGL(( Sum_Kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0,
d_out, d_intermediate, d_out);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipFree(d_intermediate);
}
int main(int argc, char** argv) {
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
std::cerr << "error: no devices supporting CUDA.\n";
exit(EXIT_FAILURE);
}
int dev = 0;
hipSetDevice(dev);
hipDeviceProp_t devProps;
if (hipGetDeviceProperties(&devProps, dev) == 0) {
printf("Using device %d:\n", dev);
printf("%S; compute v%d.%d; clock: %dkHz\n",
devProps.name, (int)devProps.major,
(int)devProps.minor, (int)devProps.clockRate);
}
const int ARRAY_SIZE = 1 << 20;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = (float)random();
}
float *d_in, *d_out;
checkCudaErrors(hipMalloc(&d_in, ARRAY_BYTES));
checkCudaErrors(hipMalloc(&d_out, ARRAY_BYTES));
checkCudaErrors(hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice));
scan(d_out, d_in, ARRAY_SIZE);
return 0;
}
| 6b1a18c8abd36a167a1e5e6fa70f391efda24b32.cu | #include <cassert>
#include <iostream>
#include <cuda_runtime.h>
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(EXIT_FAILURE);
}
}
__global__ void Hillis_Steele_Scan_Kernel(float *d_out, float *d_intermediate, float *d_in) {
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
int pout = 0, pin = 1;
sdata[tid] = d_in[myId];
__syncthreads();
for(unsigned int s = 1; s < blockDim.x; s <<= 1) {
pout = 1 - pout;
pin = 1 - pout;
if (tid >= s)
sdata[pout*blockDim.x + tid] = sdata[pin*blockDim.x + tid] + sdata[pin*blockDim.x + tid - s];
else
sdata[pout*blockDim.x + tid] = sdata[pin*blockDim.x + tid];
__syncthreads();
}
if (tid == blockDim.x - 1) {
d_intermediate[blockIdx.x] = sdata[blockDim.x - 1];
}
d_out[myId] = sdata[tid];
}
__global__ void Blelloch_Scan_Kernel(float *d_out, float *d_intermediate, float *d_in) {
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
sdata[tid] = d_in[myId];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid]
}
}
}
__global__ void Sum_Kernel(float *d_out, float *d_intermediate, float *d_in) {
}
void scan(float *d_out, float *d_in, int size) {
float *d_intermediate;
const int maxThreadsPerBlock = 1024;
int threads = maxThreadsPerBlock;
int blocks = size/threads;
checkCudaErrors(cudaMalloc(&d_intermediate, blocks * sizeof(float)));
Hillis_Steele_Scan_Kernel<<<blocks, threads, 2 * threads * sizeof(float)>>>
(d_out, d_intermediate, d_in);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
threads = blocks;
blocks = 1;
Blelloch_Scan_Kernel<<<blocks, threads, threads * sizeof(float)>>>
(d_intermediate, d_intermediate, d_intermediate);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
threads = maxThreadsPerBlock;
blocks = size/threads;
Sum_Kernel<<<blocks, threads, threads * sizeof(float)>>>
(d_out, d_intermediate, d_out);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
cudaFree(d_intermediate);
}
int main(int argc, char** argv) {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
std::cerr << "error: no devices supporting CUDA.\n";
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0) {
printf("Using device %d:\n", dev);
printf("%S; compute v%d.%d; clock: %dkHz\n",
devProps.name, (int)devProps.major,
(int)devProps.minor, (int)devProps.clockRate);
}
const int ARRAY_SIZE = 1 << 20;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = (float)random();
}
float *d_in, *d_out;
checkCudaErrors(cudaMalloc(&d_in, ARRAY_BYTES));
checkCudaErrors(cudaMalloc(&d_out, ARRAY_BYTES));
checkCudaErrors(cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice));
scan(d_out, d_in, ARRAY_SIZE);
return 0;
}
|
e5563f85ab448932a45f0fa5f98024cefa04e307.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "type.h"
#define MB_BACKOFF 1000
void gpu_init()
{
hipSetDevice(0);
hipDeviceReset();
}
int get_dims()
{
size_t mem_free, mem_total;
hipMemGetInfo(&mem_free, &mem_total);
double bytes = (double)(mem_total - (size_t)MB_BACKOFF*1024*1024);
return (int) sqrt(bytes/sizeof(REAL));
}
void gpu_to_host(void *x_cpu, void *x_gpu, size_t len)
{
hipMemcpy(x_cpu, x_gpu, len, hipMemcpyDeviceToHost);
}
void gpu_free(void *x)
{
if (x)
hipFree(x);
}
| e5563f85ab448932a45f0fa5f98024cefa04e307.cu | #include <cuda_runtime.h>
#include "type.h"
#define MB_BACKOFF 1000
void gpu_init()
{
cudaSetDevice(0);
cudaDeviceReset();
}
int get_dims()
{
size_t mem_free, mem_total;
cudaMemGetInfo(&mem_free, &mem_total);
double bytes = (double)(mem_total - (size_t)MB_BACKOFF*1024*1024);
return (int) sqrt(bytes/sizeof(REAL));
}
void gpu_to_host(void *x_cpu, void *x_gpu, size_t len)
{
cudaMemcpy(x_cpu, x_gpu, len, cudaMemcpyDeviceToHost);
}
void gpu_free(void *x)
{
if (x)
cudaFree(x);
}
|
9d80a51225b5e42adcce53a8af3eb4d31bac148b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stereo.h"
__global__
void CloneKernel(const float *src, int width, int height, int stride, float *dst)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
dst[pos] = src[pos];
}
void Stereo::Clone(const float *src, int w, int h, int s, float *dst)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
CloneKernel << < blocks, threads >> > (src, w, h, s, dst);
}
// Set Value
__global__
void SetValueKernel(float *image, float value, int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
image[pos] = value;
}
void Stereo::SetValue(float *image, float value, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
SetValueKernel << < blocks, threads >> > (image, value, w, h, s);
}
| 9d80a51225b5e42adcce53a8af3eb4d31bac148b.cu | #include "stereo.h"
__global__
void CloneKernel(const float *src, int width, int height, int stride, float *dst)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
dst[pos] = src[pos];
}
void Stereo::Clone(const float *src, int w, int h, int s, float *dst)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
CloneKernel << < blocks, threads >> > (src, w, h, s, dst);
}
// Set Value
__global__
void SetValueKernel(float *image, float value, int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
image[pos] = value;
}
void Stereo::SetValue(float *image, float value, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
SetValueKernel << < blocks, threads >> > (image, value, w, h, s);
}
|
785d408194da163b38f3b11aedf53e8db9377754.hip | // !!! This is a file automatically generated by hipify!!!
/*
* University of Illinois Open Source License
* Copyright 2010 Luthey-Schulten Group,
* All rights reserved.
*
* Developed by: Luthey-Schulten Group
* University of Illinois at Urbana-Champaign
* http://www.scs.uiuc.edu/~schulten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the Software), to deal with
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the names of the Luthey-Schulten Group, University of Illinois at
* Urbana-Champaign, nor the names of its contributors may be used to endorse or
* promote products derived from this Software without specific prior written
* permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS WITH THE SOFTWARE.
*
* Author(s): Elijah Roberts
*/
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include "lptf/Profile.h"
#include "lm/Cuda.h"
#include "lm/Math.h"
#include "TimingConstants.h"
#include "lm/rdme/dev/xor_random_dev.cu"
#include "lm/rdme/dev/bit_packed_diffusion_1d_dev.cu"
// Allocate the profile space.
PROF_ALLOC;
#define X_SIZE 128
#define Y_SIZE 128
#define Z_SIZE 64
#define PARTICLE_COUNT 216720 // 1 mM
__global__ void x_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int xBlockMaskMod, const unsigned int yBlockShiftMult, const unsigned int latticeXSize, const unsigned int latticeXSizeShiftMult, const unsigned int latticeXYSizeShiftMult, const unsigned int blockXSize, const unsigned int blockXSizeShiftMult, const unsigned long long timestepHash, unsigned int* siteOverflowList);
__global__ void y_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int xBlockMaskMod, const unsigned int yBlockShiftMult, const unsigned int latticeXSize, const unsigned int latticeXSizeShiftMult, const unsigned int latticeYSize, const unsigned int latticeXYSizeShiftMult, const unsigned long long timestepHash, unsigned int* siteOverflowList);
__global__ void z_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int xBlockMaskMod, const unsigned int zBlockShiftDiv, const unsigned int latticeXSizeShiftMult, const unsigned int latticeXYSize, const unsigned int latticeXYSizeShiftMult, const unsigned int latticeZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList);
void runTimestep(hipStream_t stream, void* inLattice, void* outLattice, void* siteOverflowList, uint64_t xseed, uint64_t yseed, uint64_t zseed) throw(lm::CUDAException);
int main(int argc, char **argv)
{
try
{
PROF_INIT;
PROF_BEGIN(PROF_MAIN_RUN);
// Allocate the cuda resources.
hipStream_t stream;
unsigned int* startLattice;
unsigned int* startLatticeCounts;
void* inLattice;
void* outLattice;
void* overflowList;
startLattice = new unsigned int[X_SIZE*Y_SIZE*Z_SIZE];
startLatticeCounts = new unsigned int[X_SIZE*Y_SIZE*Z_SIZE];
memset(startLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int));
memset(startLatticeCounts, 0, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int));
CUDA_EXCEPTION_CHECK(hipStreamCreate(&stream));
CUDA_EXCEPTION_CHECK(hipMalloc(&inLattice, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMalloc(&outLattice, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMalloc(&overflowList, LKCUDA_OVERFLOW_LIST_ENTRIES*sizeof(unsigned int)));
// Fill in some random particles.
srand(2010);
for (unsigned int i=0; i<PARTICLE_COUNT; i++)
{
unsigned int r = (unsigned int)((((double)rand())/((double)RAND_MAX))*((double)X_SIZE)*((double)Y_SIZE)*((double)Z_SIZE));
if (startLatticeCounts[r] < MPD_PARTICLE_COUNT)
{
startLattice[r] |= ((rand()%15)+1)<<(MPD_PARTICLE_SHIFT*startLatticeCounts[r]++);
}
else
{
printf("Warning: skipped adding particle to fully occupied site.\n");
}
}
// Start timings the kernels.
PROF_BEGIN(PROF_SUBMIT_KERNELS);
PROF_CUDA_START(stream);
// Launch the kernels.
int NUM_LAUNCHES=100;
for (int i=0; i<NUM_LAUNCHES; i++)
{
// Reset the memory.
CUDA_EXCEPTION_CHECK(hipMemcpy(inLattice, startLattice, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int), hipMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMemset(overflowList, 0, LKCUDA_OVERFLOW_LIST_ENTRIES*sizeof(unsigned int)));
// Run the timestep.
PROF_CUDA_BEGIN(PROF_TIMESTEP_RUNNING,stream);
runTimestep(stream, inLattice, outLattice, overflowList, 1, 2, 3);
PROF_CUDA_END(PROF_TIMESTEP_RUNNING,stream);
}
// Wait for all of the kernels to finish.
CUDA_EXCEPTION_CHECK(hipStreamSynchronize(stream));
// Record the timings.
PROF_CUDA_FINISH(stream);
CUDA_EXCEPTION_CHECK(hipFree(overflowList));
CUDA_EXCEPTION_CHECK(hipFree(outLattice));
CUDA_EXCEPTION_CHECK(hipFree(inLattice));
delete[] startLatticeCounts;
delete[] startLattice;
CUDA_EXCEPTION_CHECK(hipStreamDestroy(stream));
PROF_END(PROF_SUBMIT_KERNELS);
printf("Profile file saved as: %s\n",PROF_MAKE_STR(PROF_OUT_FILE));
PROF_END(PROF_MAIN_RUN);
PROF_WRITE;
return 0;
}
catch (lm::CUDAException& e)
{
std::cerr << "CUDA Exception during execution: " << e.what() << std::endl;
}
catch (std::exception& e)
{
std::cerr << "Exception during execution: " << e.what() << std::endl;
}
catch (...)
{
std::cerr << "Unknown Exception during execution." << std::endl;
}
PROF_END(PROF_MAIN_RUN);
PROF_WRITE;
return -1;
}
void runTimestep(hipStream_t stream, void* inLattice, void* outLattice, void* siteOverflowList, uint64_t xseed, uint64_t yseed, uint64_t zseed)
throw(lm::CUDAException)
{
// Calculate some properties of the lattice.
const unsigned int latticeXSize = X_SIZE;
const unsigned int latticeYSize = Y_SIZE;
const unsigned int latticeZSize = Z_SIZE;
const unsigned int latticeXYSize = latticeXSize*latticeYSize;
const unsigned int latticeXSizeShiftMult = log2(latticeXSize);
const unsigned int latticeYSizeShiftMult = log2(latticeYSize);
const unsigned int latticeZSizeShiftMult = log2(latticeZSize);
const unsigned int latticeXYSizeShiftMult = latticeXSizeShiftMult+latticeYSizeShiftMult;
// Execute the kernel for the x direction.
PROF_CUDA_BEGIN(PROF_X_DIFFUSION,stream);
unsigned int xBlockXSize = min(MPD_X_BLOCK_MAX_X_SIZE,latticeXSize);
unsigned int xGrid = latticeXSize/xBlockXSize;
unsigned int yGrid = latticeYSize;
unsigned int zGrid = latticeZSize;
dim3 grid(xGrid*yGrid, zGrid);
dim3 threads(xBlockXSize, 1, 1);
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((x_kernel), dim3(grid),dim3(threads),0,stream, (unsigned int*)inLattice, (unsigned int*)outLattice, xGrid-1, log2(xGrid), latticeXSize, latticeXSizeShiftMult, latticeXYSizeShiftMult, xBlockXSize, log2(xBlockXSize), xseed, (unsigned int*)siteOverflowList)));
PROF_CUDA_END(PROF_X_DIFFUSION,stream);
// Execute the kernel for the y direction.
PROF_CUDA_BEGIN(PROF_Y_DIFFUSION,stream);
xGrid = latticeXSize/MPD_Y_BLOCK_X_SIZE;
yGrid = latticeYSize/MPD_Y_BLOCK_Y_SIZE;
zGrid = latticeZSize;
grid.x = xGrid*yGrid;
grid.y = zGrid;
threads.x = MPD_Y_BLOCK_X_SIZE;
threads.y = MPD_Y_BLOCK_Y_SIZE;
threads.z = 1;
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((y_kernel), dim3(grid),dim3(threads),0,stream, (unsigned int*)outLattice, (unsigned int*)inLattice, xGrid-1, log2(xGrid), latticeXSize, latticeXSizeShiftMult, latticeYSize, latticeXYSizeShiftMult, yseed, (unsigned int*)siteOverflowList)));
PROF_CUDA_END(PROF_Y_DIFFUSION,stream);
// Execute the kernel for the z direction.
PROF_CUDA_BEGIN(PROF_Z_DIFFUSION,stream);
xGrid = latticeXSize/MPD_Z_BLOCK_X_SIZE;
yGrid = latticeYSize;
zGrid = latticeZSize/MPD_Z_BLOCK_Z_SIZE;
grid.x = xGrid*zGrid;
grid.y = yGrid;
threads.x = MPD_Z_BLOCK_X_SIZE;
threads.y = 1;
threads.z = MPD_Z_BLOCK_Z_SIZE;
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((z_kernel), dim3(grid),dim3(threads),0,stream, (unsigned int*)inLattice, (unsigned int*)outLattice, xGrid-1, log2(xGrid), latticeXSizeShiftMult, latticeXYSize, latticeXYSizeShiftMult, latticeZSize, zseed, (unsigned int*)siteOverflowList)));
PROF_CUDA_END(PROF_Z_DIFFUSION,stream);
}
__global__ void x_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int xBlockMaskMod, const unsigned int yBlockShiftMult, const unsigned int latticeXSize, const unsigned int latticeXSizeShiftMult, const unsigned int latticeXYSizeShiftMult, const unsigned int blockXSize, const unsigned int blockXSizeShiftMult, const unsigned long long timestepHash, unsigned int* siteOverflowList)
{
const unsigned int bx = blockIdx.x&xBlockMaskMod;
const unsigned int by = blockIdx.x>>yBlockShiftMult;
const unsigned int bz = blockIdx.y;
const unsigned int x = threadIdx.x;
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx<<blockXSizeShiftMult) + x;
unsigned int latticeIndex = (bz<<latticeXYSizeShiftMult) + (by<<latticeXSizeShiftMult) + latticeXIndex;
unsigned int latticeSegmentIndex = x+MPD_WINDOW_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int latticeSegment[MPD_X_WINDOW_SIZE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(inLattice, latticeIndex, latticeXIndex, latticeXSize, latticeSegment, latticeSegmentIndex, blockXSize, timestepHash);
////////////////////////////////////////
// Make the choice for each particle. //
////////////////////////////////////////
__shared__ unsigned int choices[MPD_X_WINDOW_SIZE];
// Make the choices.
makeXDiffusionChoices(latticeIndex, latticeXIndex, latticeXSize, latticeSegment, latticeSegmentIndex, choices, blockXSize, timestepHash);
//////////////////////////////////////////////////////////
// Create version of the lattice at the next time step. //
//////////////////////////////////////////////////////////
// Progate the choices to the new lattice segment.
performPropagation(outLattice, latticeIndex, latticeSegment, latticeSegmentIndex-1, latticeSegmentIndex, latticeSegmentIndex+1, choices, siteOverflowList);
}
/**
* Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice
* site, storing the new lattice into shared memory, and then updating the global lattice.
*/
__global__ void y_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int xBlockMaskMod, const unsigned int yBlockShiftMult, const unsigned int latticeXSize, const unsigned int latticeXSizeShiftMult, const unsigned int latticeYSize, const unsigned int latticeXYSizeShiftMult, const unsigned long long timestepHash, unsigned int* siteOverflowList)
{
const unsigned int bx = blockIdx.x&xBlockMaskMod;
const unsigned int by = blockIdx.x>>yBlockShiftMult;
const unsigned int bz = blockIdx.y;
const unsigned int x = threadIdx.x;
const unsigned int y = threadIdx.y;
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeYIndex = (by<<MPD_Y_BLOCK_Y_SIZE_SHIFT_MULT) + y;
unsigned int latticeIndex = (bz<<latticeXYSizeShiftMult) + (latticeYIndex<<latticeXSizeShiftMult) + (bx<<MPD_Y_BLOCK_X_SIZE_SHIFT_MULT) + x;
unsigned int latticeYSegmentIndex = y+MPD_WINDOW_APRON_SIZE;
unsigned int latticeSegmentIndex = (latticeYSegmentIndex<<MPD_Y_BLOCK_X_SIZE_SHIFT_MULT) + x;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int latticeSegment[MPD_Y_WINDOW_SIZE];
// Copy the x window from device memory into shared memory.
copyYWindowFromLattice(inLattice, latticeIndex, latticeYIndex, latticeXSize, latticeYSize, latticeSegment, latticeSegmentIndex, latticeYSegmentIndex, timestepHash);
////////////////////////////////////////
// Make the choice for each particle. //
////////////////////////////////////////
__shared__ unsigned int choices[MPD_Y_WINDOW_SIZE];
// Make the choices.
makeYDiffusionChoices(latticeIndex, latticeYIndex, latticeXSize, latticeYSize, latticeSegment, latticeSegmentIndex, latticeYSegmentIndex, choices, timestepHash);
//////////////////////////////////////////////////////////
// Create version of the lattice at the next time step. //
//////////////////////////////////////////////////////////
// Progate the choices to the new lattice segment.
performPropagation(outLattice, latticeIndex, latticeSegment, latticeSegmentIndex-MPD_Y_BLOCK_X_SIZE, latticeSegmentIndex, latticeSegmentIndex+MPD_Y_BLOCK_X_SIZE, choices, siteOverflowList);
}
/**
* Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice
* site, storing the new lattice into shared memory, and then updating the global lattice.
*/
__global__ void z_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int xBlockMaskMod, const unsigned int zBlockShiftDiv, const unsigned int latticeXSizeShiftMult, const unsigned int latticeXYSize, const unsigned int latticeXYSizeShiftMult, const unsigned int latticeZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList)
{
const unsigned int bx = blockIdx.x&xBlockMaskMod;
const unsigned int by = blockIdx.y;
const unsigned int bz = blockIdx.x>>zBlockShiftDiv;
const unsigned int x = threadIdx.x;
const unsigned int z = threadIdx.z;
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeZIndex = (bz<<MPD_Z_BLOCK_Z_SIZE_SHIFT_MULT) + z;
unsigned int latticeIndex = (latticeZIndex<<latticeXYSizeShiftMult) + (by<<latticeXSizeShiftMult) + (bx<<MPD_Z_BLOCK_X_SIZE_SHIFT_MULT) + x;
unsigned int latticeZSegmentIndex = z+MPD_WINDOW_APRON_SIZE;
unsigned int latticeSegmentIndex = (latticeZSegmentIndex<<MPD_Z_BLOCK_X_SIZE_SHIFT_MULT) + x;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int latticeSegment[MPD_Z_WINDOW_SIZE];
// Copy the x window from device memory into shared memory.
copyZWindowFromLattice(inLattice, latticeIndex, latticeZIndex, latticeXYSize, latticeZSize, latticeSegment, latticeSegmentIndex, latticeZSegmentIndex, timestepHash);
////////////////////////////////////////
// Make the choice for each particle. //
////////////////////////////////////////
__shared__ unsigned int choices[MPD_Z_WINDOW_SIZE];
// Make the choices.
makeZDiffusionChoices(latticeIndex, latticeZIndex, latticeXYSize, latticeZSize, latticeSegment, latticeSegmentIndex, latticeZSegmentIndex, choices, timestepHash);
//////////////////////////////////////////////////////////
// Create version of the lattice at the next time step. //
//////////////////////////////////////////////////////////
// Progate the choices to the new lattice segment.
performPropagation(outLattice, latticeIndex, latticeSegment, latticeSegmentIndex-MPD_Z_BLOCK_X_SIZE, latticeSegmentIndex, latticeSegmentIndex+MPD_Z_BLOCK_X_SIZE, choices, siteOverflowList);
}
| 785d408194da163b38f3b11aedf53e8db9377754.cu | /*
* University of Illinois Open Source License
* Copyright 2010 Luthey-Schulten Group,
* All rights reserved.
*
* Developed by: Luthey-Schulten Group
* University of Illinois at Urbana-Champaign
* http://www.scs.uiuc.edu/~schulten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the Software), to deal with
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the names of the Luthey-Schulten Group, University of Illinois at
* Urbana-Champaign, nor the names of its contributors may be used to endorse or
* promote products derived from this Software without specific prior written
* permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS WITH THE SOFTWARE.
*
* Author(s): Elijah Roberts
*/
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <stdint.h>
#include <cuda.h>
#include "lptf/Profile.h"
#include "lm/Cuda.h"
#include "lm/Math.h"
#include "TimingConstants.h"
#include "lm/rdme/dev/xor_random_dev.cu"
#include "lm/rdme/dev/bit_packed_diffusion_1d_dev.cu"
// Allocate the profile space.
PROF_ALLOC;
#define X_SIZE 128
#define Y_SIZE 128
#define Z_SIZE 64
#define PARTICLE_COUNT 216720 // 1 mM
__global__ void x_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int xBlockMaskMod, const unsigned int yBlockShiftMult, const unsigned int latticeXSize, const unsigned int latticeXSizeShiftMult, const unsigned int latticeXYSizeShiftMult, const unsigned int blockXSize, const unsigned int blockXSizeShiftMult, const unsigned long long timestepHash, unsigned int* siteOverflowList);
__global__ void y_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int xBlockMaskMod, const unsigned int yBlockShiftMult, const unsigned int latticeXSize, const unsigned int latticeXSizeShiftMult, const unsigned int latticeYSize, const unsigned int latticeXYSizeShiftMult, const unsigned long long timestepHash, unsigned int* siteOverflowList);
__global__ void z_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int xBlockMaskMod, const unsigned int zBlockShiftDiv, const unsigned int latticeXSizeShiftMult, const unsigned int latticeXYSize, const unsigned int latticeXYSizeShiftMult, const unsigned int latticeZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList);
void runTimestep(cudaStream_t stream, void* inLattice, void* outLattice, void* siteOverflowList, uint64_t xseed, uint64_t yseed, uint64_t zseed) throw(lm::CUDAException);
int main(int argc, char **argv)
{
try
{
PROF_INIT;
PROF_BEGIN(PROF_MAIN_RUN);
// Allocate the cuda resources.
cudaStream_t stream;
unsigned int* startLattice;
unsigned int* startLatticeCounts;
void* inLattice;
void* outLattice;
void* overflowList;
startLattice = new unsigned int[X_SIZE*Y_SIZE*Z_SIZE];
startLatticeCounts = new unsigned int[X_SIZE*Y_SIZE*Z_SIZE];
memset(startLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int));
memset(startLatticeCounts, 0, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int));
CUDA_EXCEPTION_CHECK(cudaStreamCreate(&stream));
CUDA_EXCEPTION_CHECK(cudaMalloc(&inLattice, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMalloc(&outLattice, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMalloc(&overflowList, LKCUDA_OVERFLOW_LIST_ENTRIES*sizeof(unsigned int)));
// Fill in some random particles.
srand(2010);
for (unsigned int i=0; i<PARTICLE_COUNT; i++)
{
unsigned int r = (unsigned int)((((double)rand())/((double)RAND_MAX))*((double)X_SIZE)*((double)Y_SIZE)*((double)Z_SIZE));
if (startLatticeCounts[r] < MPD_PARTICLE_COUNT)
{
startLattice[r] |= ((rand()%15)+1)<<(MPD_PARTICLE_SHIFT*startLatticeCounts[r]++);
}
else
{
printf("Warning: skipped adding particle to fully occupied site.\n");
}
}
// Start timings the kernels.
PROF_BEGIN(PROF_SUBMIT_KERNELS);
PROF_CUDA_START(stream);
// Launch the kernels.
int NUM_LAUNCHES=100;
for (int i=0; i<NUM_LAUNCHES; i++)
{
// Reset the memory.
CUDA_EXCEPTION_CHECK(cudaMemcpy(inLattice, startLattice, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int), cudaMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMemset(overflowList, 0, LKCUDA_OVERFLOW_LIST_ENTRIES*sizeof(unsigned int)));
// Run the timestep.
PROF_CUDA_BEGIN(PROF_TIMESTEP_RUNNING,stream);
runTimestep(stream, inLattice, outLattice, overflowList, 1, 2, 3);
PROF_CUDA_END(PROF_TIMESTEP_RUNNING,stream);
}
// Wait for all of the kernels to finish.
CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(stream));
// Record the timings.
PROF_CUDA_FINISH(stream);
CUDA_EXCEPTION_CHECK(cudaFree(overflowList));
CUDA_EXCEPTION_CHECK(cudaFree(outLattice));
CUDA_EXCEPTION_CHECK(cudaFree(inLattice));
delete[] startLatticeCounts;
delete[] startLattice;
CUDA_EXCEPTION_CHECK(cudaStreamDestroy(stream));
PROF_END(PROF_SUBMIT_KERNELS);
printf("Profile file saved as: %s\n",PROF_MAKE_STR(PROF_OUT_FILE));
PROF_END(PROF_MAIN_RUN);
PROF_WRITE;
return 0;
}
catch (lm::CUDAException& e)
{
std::cerr << "CUDA Exception during execution: " << e.what() << std::endl;
}
catch (std::exception& e)
{
std::cerr << "Exception during execution: " << e.what() << std::endl;
}
catch (...)
{
std::cerr << "Unknown Exception during execution." << std::endl;
}
PROF_END(PROF_MAIN_RUN);
PROF_WRITE;
return -1;
}
void runTimestep(cudaStream_t stream, void* inLattice, void* outLattice, void* siteOverflowList, uint64_t xseed, uint64_t yseed, uint64_t zseed)
throw(lm::CUDAException)
{
// Calculate some properties of the lattice.
const unsigned int latticeXSize = X_SIZE;
const unsigned int latticeYSize = Y_SIZE;
const unsigned int latticeZSize = Z_SIZE;
const unsigned int latticeXYSize = latticeXSize*latticeYSize;
const unsigned int latticeXSizeShiftMult = log2(latticeXSize);
const unsigned int latticeYSizeShiftMult = log2(latticeYSize);
const unsigned int latticeZSizeShiftMult = log2(latticeZSize);
const unsigned int latticeXYSizeShiftMult = latticeXSizeShiftMult+latticeYSizeShiftMult;
// Execute the kernel for the x direction.
PROF_CUDA_BEGIN(PROF_X_DIFFUSION,stream);
unsigned int xBlockXSize = min(MPD_X_BLOCK_MAX_X_SIZE,latticeXSize);
unsigned int xGrid = latticeXSize/xBlockXSize;
unsigned int yGrid = latticeYSize;
unsigned int zGrid = latticeZSize;
dim3 grid(xGrid*yGrid, zGrid);
dim3 threads(xBlockXSize, 1, 1);
CUDA_EXCEPTION_EXECUTE((x_kernel<<<grid,threads,0,stream>>>((unsigned int*)inLattice, (unsigned int*)outLattice, xGrid-1, log2(xGrid), latticeXSize, latticeXSizeShiftMult, latticeXYSizeShiftMult, xBlockXSize, log2(xBlockXSize), xseed, (unsigned int*)siteOverflowList)));
PROF_CUDA_END(PROF_X_DIFFUSION,stream);
// Execute the kernel for the y direction.
PROF_CUDA_BEGIN(PROF_Y_DIFFUSION,stream);
xGrid = latticeXSize/MPD_Y_BLOCK_X_SIZE;
yGrid = latticeYSize/MPD_Y_BLOCK_Y_SIZE;
zGrid = latticeZSize;
grid.x = xGrid*yGrid;
grid.y = zGrid;
threads.x = MPD_Y_BLOCK_X_SIZE;
threads.y = MPD_Y_BLOCK_Y_SIZE;
threads.z = 1;
CUDA_EXCEPTION_EXECUTE((y_kernel<<<grid,threads,0,stream>>>((unsigned int*)outLattice, (unsigned int*)inLattice, xGrid-1, log2(xGrid), latticeXSize, latticeXSizeShiftMult, latticeYSize, latticeXYSizeShiftMult, yseed, (unsigned int*)siteOverflowList)));
PROF_CUDA_END(PROF_Y_DIFFUSION,stream);
// Execute the kernel for the z direction.
PROF_CUDA_BEGIN(PROF_Z_DIFFUSION,stream);
xGrid = latticeXSize/MPD_Z_BLOCK_X_SIZE;
yGrid = latticeYSize;
zGrid = latticeZSize/MPD_Z_BLOCK_Z_SIZE;
grid.x = xGrid*zGrid;
grid.y = yGrid;
threads.x = MPD_Z_BLOCK_X_SIZE;
threads.y = 1;
threads.z = MPD_Z_BLOCK_Z_SIZE;
CUDA_EXCEPTION_EXECUTE((z_kernel<<<grid,threads,0,stream>>>((unsigned int*)inLattice, (unsigned int*)outLattice, xGrid-1, log2(xGrid), latticeXSizeShiftMult, latticeXYSize, latticeXYSizeShiftMult, latticeZSize, zseed, (unsigned int*)siteOverflowList)));
PROF_CUDA_END(PROF_Z_DIFFUSION,stream);
}
__global__ void x_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int xBlockMaskMod, const unsigned int yBlockShiftMult, const unsigned int latticeXSize, const unsigned int latticeXSizeShiftMult, const unsigned int latticeXYSizeShiftMult, const unsigned int blockXSize, const unsigned int blockXSizeShiftMult, const unsigned long long timestepHash, unsigned int* siteOverflowList)
{
const unsigned int bx = blockIdx.x&xBlockMaskMod;
const unsigned int by = blockIdx.x>>yBlockShiftMult;
const unsigned int bz = blockIdx.y;
const unsigned int x = threadIdx.x;
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx<<blockXSizeShiftMult) + x;
unsigned int latticeIndex = (bz<<latticeXYSizeShiftMult) + (by<<latticeXSizeShiftMult) + latticeXIndex;
unsigned int latticeSegmentIndex = x+MPD_WINDOW_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int latticeSegment[MPD_X_WINDOW_SIZE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(inLattice, latticeIndex, latticeXIndex, latticeXSize, latticeSegment, latticeSegmentIndex, blockXSize, timestepHash);
////////////////////////////////////////
// Make the choice for each particle. //
////////////////////////////////////////
__shared__ unsigned int choices[MPD_X_WINDOW_SIZE];
// Make the choices.
makeXDiffusionChoices(latticeIndex, latticeXIndex, latticeXSize, latticeSegment, latticeSegmentIndex, choices, blockXSize, timestepHash);
//////////////////////////////////////////////////////////
// Create version of the lattice at the next time step. //
//////////////////////////////////////////////////////////
// Progate the choices to the new lattice segment.
performPropagation(outLattice, latticeIndex, latticeSegment, latticeSegmentIndex-1, latticeSegmentIndex, latticeSegmentIndex+1, choices, siteOverflowList);
}
/**
* Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice
* site, storing the new lattice into shared memory, and then updating the global lattice.
*/
__global__ void y_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int xBlockMaskMod, const unsigned int yBlockShiftMult, const unsigned int latticeXSize, const unsigned int latticeXSizeShiftMult, const unsigned int latticeYSize, const unsigned int latticeXYSizeShiftMult, const unsigned long long timestepHash, unsigned int* siteOverflowList)
{
const unsigned int bx = blockIdx.x&xBlockMaskMod;
const unsigned int by = blockIdx.x>>yBlockShiftMult;
const unsigned int bz = blockIdx.y;
const unsigned int x = threadIdx.x;
const unsigned int y = threadIdx.y;
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeYIndex = (by<<MPD_Y_BLOCK_Y_SIZE_SHIFT_MULT) + y;
unsigned int latticeIndex = (bz<<latticeXYSizeShiftMult) + (latticeYIndex<<latticeXSizeShiftMult) + (bx<<MPD_Y_BLOCK_X_SIZE_SHIFT_MULT) + x;
unsigned int latticeYSegmentIndex = y+MPD_WINDOW_APRON_SIZE;
unsigned int latticeSegmentIndex = (latticeYSegmentIndex<<MPD_Y_BLOCK_X_SIZE_SHIFT_MULT) + x;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int latticeSegment[MPD_Y_WINDOW_SIZE];
// Copy the x window from device memory into shared memory.
copyYWindowFromLattice(inLattice, latticeIndex, latticeYIndex, latticeXSize, latticeYSize, latticeSegment, latticeSegmentIndex, latticeYSegmentIndex, timestepHash);
////////////////////////////////////////
// Make the choice for each particle. //
////////////////////////////////////////
__shared__ unsigned int choices[MPD_Y_WINDOW_SIZE];
// Make the choices.
makeYDiffusionChoices(latticeIndex, latticeYIndex, latticeXSize, latticeYSize, latticeSegment, latticeSegmentIndex, latticeYSegmentIndex, choices, timestepHash);
//////////////////////////////////////////////////////////
// Create version of the lattice at the next time step. //
//////////////////////////////////////////////////////////
// Progate the choices to the new lattice segment.
performPropagation(outLattice, latticeIndex, latticeSegment, latticeSegmentIndex-MPD_Y_BLOCK_X_SIZE, latticeSegmentIndex, latticeSegmentIndex+MPD_Y_BLOCK_X_SIZE, choices, siteOverflowList);
}
/**
* Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice
* site, storing the new lattice into shared memory, and then updating the global lattice.
*/
__global__ void z_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int xBlockMaskMod, const unsigned int zBlockShiftDiv, const unsigned int latticeXSizeShiftMult, const unsigned int latticeXYSize, const unsigned int latticeXYSizeShiftMult, const unsigned int latticeZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList)
{
const unsigned int bx = blockIdx.x&xBlockMaskMod;
const unsigned int by = blockIdx.y;
const unsigned int bz = blockIdx.x>>zBlockShiftDiv;
const unsigned int x = threadIdx.x;
const unsigned int z = threadIdx.z;
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeZIndex = (bz<<MPD_Z_BLOCK_Z_SIZE_SHIFT_MULT) + z;
unsigned int latticeIndex = (latticeZIndex<<latticeXYSizeShiftMult) + (by<<latticeXSizeShiftMult) + (bx<<MPD_Z_BLOCK_X_SIZE_SHIFT_MULT) + x;
unsigned int latticeZSegmentIndex = z+MPD_WINDOW_APRON_SIZE;
unsigned int latticeSegmentIndex = (latticeZSegmentIndex<<MPD_Z_BLOCK_X_SIZE_SHIFT_MULT) + x;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int latticeSegment[MPD_Z_WINDOW_SIZE];
// Copy the x window from device memory into shared memory.
copyZWindowFromLattice(inLattice, latticeIndex, latticeZIndex, latticeXYSize, latticeZSize, latticeSegment, latticeSegmentIndex, latticeZSegmentIndex, timestepHash);
////////////////////////////////////////
// Make the choice for each particle. //
////////////////////////////////////////
__shared__ unsigned int choices[MPD_Z_WINDOW_SIZE];
// Make the choices.
makeZDiffusionChoices(latticeIndex, latticeZIndex, latticeXYSize, latticeZSize, latticeSegment, latticeSegmentIndex, latticeZSegmentIndex, choices, timestepHash);
//////////////////////////////////////////////////////////
// Create version of the lattice at the next time step. //
//////////////////////////////////////////////////////////
// Progate the choices to the new lattice segment.
performPropagation(outLattice, latticeIndex, latticeSegment, latticeSegmentIndex-MPD_Z_BLOCK_X_SIZE, latticeSegmentIndex, latticeSegmentIndex+MPD_Z_BLOCK_X_SIZE, choices, siteOverflowList);
}
|
1d772c59ad5540eff9d07d7e2d5dae720bfca53b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define N 30
#define TAMANIO_MATRIZ_C 224
#define BLOCKSIZE_MEDIA 512
#define ABS(a) (((a) < 0) ? -(a) : (a))
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
// Kernels SPCA //
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
__global__ void NormalizeX(float* d_image, float *d_pixel, int num_bands, int num_lines, int num_samples, int iterations){
__shared__ float sdata[BLOCKSIZE_MEDIA];
__shared__ float smean[1];
__shared__ float svar[1];
int it, s;
unsigned int tid = threadIdx.x;
int element;
if(tid==0){
smean[0]=0;
svar[0]=0;
}
for (it=0; it<iterations; it++){
element=(num_lines*num_samples*blockIdx.x)+(blockDim.x*it);
if((it*blockDim.x)+tid<num_lines*num_samples){
sdata[tid]=d_image[element+tid];
}
else{
sdata[tid]=0;
}
__syncthreads();
for(s=blockDim.x/2; s>0; s=s/2){
if (tid < s){
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if(tid==0){
smean[0]+=sdata[0];
}
__syncthreads();
}
if(tid==0){
smean[0]/=(num_lines*num_samples);
}
__syncthreads();
for (it=0; it<iterations; it++){
element=(num_lines*num_samples*blockIdx.x)+(blockDim.x*it);
if((it*blockDim.x)+tid<num_lines*num_samples){
sdata[tid]=(d_image[element+tid]-smean[0])*(d_image[element+tid]-smean[0]);
}
else{
sdata[tid]=0;
}
__syncthreads();
for(s=blockDim.x/2; s>0; s=s/2){
if (tid < s){
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if(tid==0){
svar[0]+=sdata[0];
}
__syncthreads();
}
if(tid==0){
svar[0]/=((num_lines*num_samples)-1);
//d_pixel[blockIdx.x]=svar[0];
}
__syncthreads();
for (it=0; it<iterations; it++){
element=(num_lines*num_samples*blockIdx.x)+(blockDim.x*it);
if((it*blockDim.x)+tid<num_lines*num_samples){
d_image[element+tid]/=svar[0];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
// Kernels NFINDR //
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
/*Este kernel calcula el volumen conseguido al agregar cada pixel al conjunto de endmembers.
Realiza la multiplicacin de d_aux * d_HIM2x2, ademas calcula el valor absoluto de cada volumen.*/
/*This kernel compute the getting volume obtained by adding each pixel to the set of
endmembers. It makes the multiplication d_VVolume = d_aux * d_HIM2x2, also it gets
the absolute value of each volume. */
__global__ void VolumeCalculation(double *d_aux, double *d_HIM2x2, double *d_Vvolume,
double tmp2,int lines_samples, int n_end){
int idx = blockDim.x * blockIdx.x+threadIdx.x;
__shared__ double s_aux[N];
double a;
if (idx<lines_samples){
if(threadIdx.x<n_end){
s_aux[threadIdx.x]=d_aux[threadIdx.x];
}
syncthreads();
a=0;
for(int i=0; i<n_end; i++){
a+=s_aux[i]*d_HIM2x2[i*lines_samples+idx];
}
a=a*tmp2;
d_Vvolume[idx]=ABS(a);
}
}
/*Este kernel obtiene los I volumenes mayores calculados en el kernel anterior siendo I el nmero
de bloques con que se estructura el lanzamiento del kernel. Adems obtiene los ndices de los pixel
que otienen dichos volumenes.*/
/*This kernel gets the I higher volumes obtained by VolumeCalculation kernel,
where I is the number of blocks that we configure the kernel launch. Also gets the index
of the pixels that get this volumes.*/
__global__ void Reduction_vol(double *d_Vvolume, double *d_volumenes, int *d_indices, int lines_samples){
__shared__ double s_v[512];
__shared__ int s_i[512];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
if((i+blockDim.x)>=lines_samples){
s_v[tid]=d_Vvolume[i];
s_i[tid]=i;
}
else{
if(d_Vvolume[i]>d_Vvolume[i + blockDim.x]){
s_v[tid]=d_Vvolume[i];
s_i[tid]=i;
}
else{
s_v[tid]=d_Vvolume[i + blockDim.x];
s_i[tid]=i+ blockDim.x;
}
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s>>=1){
if (tid < s){
if(s_v[tid]<=s_v[tid+s]){
s_v[tid]=s_v[tid+s];
s_i[tid]=s_i[tid+s];
}
}
__syncthreads();
}
d_volumenes[blockIdx.x]=s_v[0];
d_indices[blockIdx.x]=s_i[0];
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
// Kernels Unmixing //
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
/*Este kernel realiza la fase final del unmixing, es decir multiplicar cada pixel por la
matriz de cmputo obtenida a partir de la matriz de endmembers, y as obtener las abundancias.*/
/*This kernel multiplicates the compute_matrix by each pixel of the hyperspectral image,
thus obtain a set of abundance vectors , each contain the fractional abundances of the
endmembers in each pixel.*/
__global__ void Unmixing(float *d_imagen, float *d_imagen_unmixing,float *d_matriz_computo, int num_lines, int num_samples, int num_bands, int N_END)
{
int pixel = blockDim.x * blockIdx.x+threadIdx.x;
__shared__ float matriz_c[TAMANIO_MATRIZ_C];
float l_pixel[TAMANIO_MATRIZ_C];
float a;
if(pixel<num_lines*num_samples){
for(int t=0; t<num_bands; t++){
l_pixel[t]=d_imagen[pixel+(num_lines*num_samples*t)];
}
for(int it=0; it<N_END; it++){
if(threadIdx.x==0){
for(int i=0; i<num_bands; i++){
matriz_c[i]=d_matriz_computo[it*num_bands+i];
}
}
__syncthreads();
a=0;
for(int k=0; k<num_bands; k++){
a+=matriz_c[k]*l_pixel[k];
}
d_imagen_unmixing[pixel+(num_lines*num_samples*it)]=a;
}
}
}
| 1d772c59ad5540eff9d07d7e2d5dae720bfca53b.cu |
#define N 30
#define TAMANIO_MATRIZ_C 224
#define BLOCKSIZE_MEDIA 512
#define ABS(a) (((a) < 0) ? -(a) : (a))
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
// Kernels SPCA //
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
__global__ void NormalizeX(float* d_image, float *d_pixel, int num_bands, int num_lines, int num_samples, int iterations){
__shared__ float sdata[BLOCKSIZE_MEDIA];
__shared__ float smean[1];
__shared__ float svar[1];
int it, s;
unsigned int tid = threadIdx.x;
int element;
if(tid==0){
smean[0]=0;
svar[0]=0;
}
for (it=0; it<iterations; it++){
element=(num_lines*num_samples*blockIdx.x)+(blockDim.x*it);
if((it*blockDim.x)+tid<num_lines*num_samples){
sdata[tid]=d_image[element+tid];
}
else{
sdata[tid]=0;
}
__syncthreads();
for(s=blockDim.x/2; s>0; s=s/2){
if (tid < s){
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if(tid==0){
smean[0]+=sdata[0];
}
__syncthreads();
}
if(tid==0){
smean[0]/=(num_lines*num_samples);
}
__syncthreads();
for (it=0; it<iterations; it++){
element=(num_lines*num_samples*blockIdx.x)+(blockDim.x*it);
if((it*blockDim.x)+tid<num_lines*num_samples){
sdata[tid]=(d_image[element+tid]-smean[0])*(d_image[element+tid]-smean[0]);
}
else{
sdata[tid]=0;
}
__syncthreads();
for(s=blockDim.x/2; s>0; s=s/2){
if (tid < s){
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if(tid==0){
svar[0]+=sdata[0];
}
__syncthreads();
}
if(tid==0){
svar[0]/=((num_lines*num_samples)-1);
//d_pixel[blockIdx.x]=svar[0];
}
__syncthreads();
for (it=0; it<iterations; it++){
element=(num_lines*num_samples*blockIdx.x)+(blockDim.x*it);
if((it*blockDim.x)+tid<num_lines*num_samples){
d_image[element+tid]/=svar[0];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
// Kernels NFINDR //
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
/*Este kernel calcula el volumen conseguido al agregar cada pixel al conjunto de endmembers.
Realiza la multiplicaci�n de d_aux * d_HIM2x2, ademas calcula el valor absoluto de cada volumen.*/
/*This kernel compute the getting volume obtained by adding each pixel to the set of
endmembers. It makes the multiplication d_VVolume = d_aux * d_HIM2x2, also it gets
the absolute value of each volume. */
__global__ void VolumeCalculation(double *d_aux, double *d_HIM2x2, double *d_Vvolume,
double tmp2,int lines_samples, int n_end){
int idx = blockDim.x * blockIdx.x+threadIdx.x;
__shared__ double s_aux[N];
double a;
if (idx<lines_samples){
if(threadIdx.x<n_end){
s_aux[threadIdx.x]=d_aux[threadIdx.x];
}
syncthreads();
a=0;
for(int i=0; i<n_end; i++){
a+=s_aux[i]*d_HIM2x2[i*lines_samples+idx];
}
a=a*tmp2;
d_Vvolume[idx]=ABS(a);
}
}
/*Este kernel obtiene los I volumenes mayores calculados en el kernel anterior siendo I el n�mero
de bloques con que se estructura el lanzamiento del kernel. Adem�s obtiene los �ndices de los pixel
que otienen dichos volumenes.*/
/*This kernel gets the I higher volumes obtained by VolumeCalculation kernel,
where I is the number of blocks that we configure the kernel launch. Also gets the index
of the pixels that get this volumes.*/
__global__ void Reduction_vol(double *d_Vvolume, double *d_volumenes, int *d_indices, int lines_samples){
__shared__ double s_v[512];
__shared__ int s_i[512];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
if((i+blockDim.x)>=lines_samples){
s_v[tid]=d_Vvolume[i];
s_i[tid]=i;
}
else{
if(d_Vvolume[i]>d_Vvolume[i + blockDim.x]){
s_v[tid]=d_Vvolume[i];
s_i[tid]=i;
}
else{
s_v[tid]=d_Vvolume[i + blockDim.x];
s_i[tid]=i+ blockDim.x;
}
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s>>=1){
if (tid < s){
if(s_v[tid]<=s_v[tid+s]){
s_v[tid]=s_v[tid+s];
s_i[tid]=s_i[tid+s];
}
}
__syncthreads();
}
d_volumenes[blockIdx.x]=s_v[0];
d_indices[blockIdx.x]=s_i[0];
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
// Kernels Unmixing //
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
/*Este kernel realiza la fase final del unmixing, es decir multiplicar cada pixel por la
matriz de c�mputo obtenida a partir de la matriz de endmembers, y as� obtener las abundancias.*/
/*This kernel multiplicates the compute_matrix by each pixel of the hyperspectral image,
thus obtain a set of abundance vectors , each contain the fractional abundances of the
endmembers in each pixel.*/
__global__ void Unmixing(float *d_imagen, float *d_imagen_unmixing,float *d_matriz_computo, int num_lines, int num_samples, int num_bands, int N_END)
{
int pixel = blockDim.x * blockIdx.x+threadIdx.x;
__shared__ float matriz_c[TAMANIO_MATRIZ_C];
float l_pixel[TAMANIO_MATRIZ_C];
float a;
if(pixel<num_lines*num_samples){
for(int t=0; t<num_bands; t++){
l_pixel[t]=d_imagen[pixel+(num_lines*num_samples*t)];
}
for(int it=0; it<N_END; it++){
if(threadIdx.x==0){
for(int i=0; i<num_bands; i++){
matriz_c[i]=d_matriz_computo[it*num_bands+i];
}
}
__syncthreads();
a=0;
for(int k=0; k<num_bands; k++){
a+=matriz_c[k]*l_pixel[k];
}
d_imagen_unmixing[pixel+(num_lines*num_samples*it)]=a;
}
}
}
|
13100313e04d7c54cc8120b57cd5571648ae550d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
/*****************************************************************************
*
*
*
*
*
* Compile with:
* nvcc -o 2initpasscuda 2initpasscuda.cu
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
****************************************************************************/
__device__ int is_a_match (char*attempt){
char plain_password1[] = "AN35";
char plain_password2[] = "GI66";
char plain_password3[] = "IU78";
char plain_password4[] = "RJ50";
char *a=attempt;
char *b=attempt;
char *c=attempt;
char *d=attempt;
char *p1=plain_password1;
char *p2=plain_password2;
char *p3=plain_password3;
char *p4=plain_password4;
while (*a == *p1){
if (*a == '\0')
{
printf("found password: %s\n",plain_password1);
break;
}
a++;
p1++;
}
while (*b == *p2){
if (*b == '\0')
{
printf("found password: %s\n",plain_password2);
break;
}
b++;
p2++;
}
while (*c == *p3){
if (*c == '\0')
{
printf("found password: %s\n",plain_password3);
break;
}
c++;
p3++;
}
while (*d == *p4){
if (*d == '\0')
{
printf("found password: %s\n",plain_password4);
break;
}
d++;
p4++;
}
return 0;
}
__global__ void kernel (){
char s,a;
char password[5];
password [4] = '\0';
int i = threadIdx.x+65;
int j = threadIdx.y+65;
char firstvalue = i ;
char secondvalue = j ;
password[0] = firstvalue ;
password [1] = secondvalue;
for (s='0';s<='9'; s++){
for (a='0';a<='9'; a++){
password[2]= s;
password[3]= a;
is_a_match(password);
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
dim3 dim (26,26);hipLaunchKernelGGL((
kernel) , dim3(1),dim3(dim), 0, 0, );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference (&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9fs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
| 13100313e04d7c54cc8120b57cd5571648ae550d.cu | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/*****************************************************************************
*
*
*
*
*
* Compile with:
* nvcc -o 2initpasscuda 2initpasscuda.cu
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
****************************************************************************/
__device__ int is_a_match (char*attempt){
char plain_password1[] = "AN35";
char plain_password2[] = "GI66";
char plain_password3[] = "IU78";
char plain_password4[] = "RJ50";
char *a=attempt;
char *b=attempt;
char *c=attempt;
char *d=attempt;
char *p1=plain_password1;
char *p2=plain_password2;
char *p3=plain_password3;
char *p4=plain_password4;
while (*a == *p1){
if (*a == '\0')
{
printf("found password: %s\n",plain_password1);
break;
}
a++;
p1++;
}
while (*b == *p2){
if (*b == '\0')
{
printf("found password: %s\n",plain_password2);
break;
}
b++;
p2++;
}
while (*c == *p3){
if (*c == '\0')
{
printf("found password: %s\n",plain_password3);
break;
}
c++;
p3++;
}
while (*d == *p4){
if (*d == '\0')
{
printf("found password: %s\n",plain_password4);
break;
}
d++;
p4++;
}
return 0;
}
__global__ void kernel (){
char s,a;
char password[5];
password [4] = '\0';
int i = threadIdx.x+65;
int j = threadIdx.y+65;
char firstvalue = i ;
char secondvalue = j ;
password[0] = firstvalue ;
password [1] = secondvalue;
for (s='0';s<='9'; s++){
for (a='0';a<='9'; a++){
password[2]= s;
password[3]= a;
is_a_match(password);
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
dim3 dim (26,26);
kernel <<<1,dim>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference (&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9fs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
23cdc812492b7b70f017d4a7c0bb28ccfecba9f9.hip | // !!! This is a file automatically generated by hipify!!!
/* This is the source file contains the methods for the Mesh struct
*
* Author: Guodong Chen
* Email: [email protected]
* Last modified: 12/04/2019
*/
#include "DG_Mesh.cuh"
#include "CUDA_Helper.cuh"
/* initialize the mesh struct */
hipError_t initMesh(DG_Mesh *Mesh){
Mesh->halfL = 0;
Mesh->N = 0;
Mesh->h = 0;
Mesh->nNode = 0;
Mesh->coord = NULL; // V matrix
Mesh->nElem = 0;
Mesh->E2N = NULL; // E2N
Mesh->nIFace = 0;
Mesh->E2F = NULL;
Mesh->IFace = NULL;
Mesh->Jac = NULL;
Mesh->detJ = NULL;
Mesh->InvJac = NULL;
Mesh->Length = NULL;
Mesh->normal = NULL;
return hipSuccess;
}
/* create Mesh struct: allocate and initialize */
hipError_t createMesh(DG_Mesh **pMesh)
{
CUDA_CALL(hipMallocManaged(pMesh, sizeof(DG_Mesh)));
CUDA_CALL(initMesh(*pMesh));
return hipSuccess;
}
/* actually generate the mesh, fill in the mesh struct members */
hipError_t generateMesh(DG_Mesh *Mesh, double halfL, int N)
{
int i, j;
int counter;
Mesh->halfL = halfL;
Mesh->N = N;
Mesh->h = (2*halfL)/N;
Mesh->nNode = (N+1)*(N+1);
double *tempCoord;
int *tempE2N;
int *tempE2F;
// allocate the coords
CUDA_CALL(hipMallocManaged(&(Mesh->coord), Mesh->nNode*sizeof(double *)));
CUDA_CALL(hipMallocManaged(&(tempCoord), 2*Mesh->nNode*sizeof(double)));
for (i=0; i<Mesh->nNode; i++)
Mesh->coord[i] = tempCoord + 2*i;
// assign coords for every node
for (i=0; i<N+1; i++)
{
for (j=0; j<N+1; j++)
{
Mesh->coord[i*(N+1)+j][0] = -halfL + j*Mesh->h;
Mesh->coord[i*(N+1)+j][1] = -halfL + i*Mesh->h;
}
}
Mesh->nElem = 2*N*N;
// allocate E2N matrix
CUDA_CALL(hipMallocManaged(&(Mesh->E2N), Mesh->nElem*sizeof(int *)));
CUDA_CALL(hipMallocManaged(&tempE2N, 3*Mesh->nElem*sizeof(int)));
for (i=0; i<Mesh->nElem; i++)
Mesh->E2N[i] = tempE2N + 3*i;
// fill in the E2N matrix
counter = 0;
for (i=0; i<N; i++)
{
for (j=0; j<N; j++)
{
Mesh->E2N[counter][0] = i*(N+1)+j;
Mesh->E2N[counter][1] = i*(N+1)+j +1;
Mesh->E2N[counter][2] = i*(N+1)+j +(N+1);
counter++;
Mesh->E2N[counter][0] = i*(N+1)+j +1 +(N+1);
Mesh->E2N[counter][1] = i*(N+1)+j +1 +N;
Mesh->E2N[counter][2] = i*(N+1)+j +1;
counter++;
}
}
Mesh->nIFace = 3*N*N;
CUDA_CALL(hipMallocManaged(&(Mesh->E2F), Mesh->nElem*sizeof(int *)));
CUDA_CALL(hipMallocManaged(&tempE2F, Mesh->nElem*3*sizeof(int)));
for (i=0; i<Mesh->nElem; i++)
Mesh->E2F[i] = &(tempE2F[i*3]);
// allocate the interior faces
CUDA_CALL(hipMallocManaged(&(Mesh->IFace), Mesh->nIFace*sizeof(DG_IFace)));
counter = 0;
for (i=0; i<N; i++)
{
for (j=0; j<N; j++)
{
Mesh->IFace[counter].ElemL = i*(2*N)+j*2;
Mesh->IFace[counter].ElemR = i*(2*N)+j*2 +1;
Mesh->IFace[counter].EdgeL = 0;
Mesh->IFace[counter].EdgeR = 0;
Mesh->IFace[counter].node[0] = i*(N+1)+j +1;
Mesh->IFace[counter].node[1] = i*(N+1)+j +(N+1);
Mesh->E2F[Mesh->IFace[counter].ElemL][0] = counter;
Mesh->E2F[Mesh->IFace[counter].ElemR][0] = counter;
counter ++;
Mesh->IFace[counter].ElemL = i*(2*N)+j*2;
if (j==0) Mesh->IFace[counter].ElemR = i*(2*N)+j*2 +2*N-1; // Periodic boundary
else Mesh->IFace[counter].ElemR = i*(2*N)+j*2 -1;
Mesh->IFace[counter].EdgeL = 1;
Mesh->IFace[counter].EdgeR = 1;
Mesh->IFace[counter].node[0] = i*(N+1)+j +(N+1);
Mesh->IFace[counter].node[1] = i*(N+1)+j;
Mesh->E2F[Mesh->IFace[counter].ElemL][1] = counter;
Mesh->E2F[Mesh->IFace[counter].ElemR][1] = counter;
counter++;
Mesh->IFace[counter].ElemL = i*(2*N)+j*2;
if (i==0) Mesh->IFace[counter].ElemR = i*(2*N)+j*2 +(N-1)*(2*N) +1; // Periodic boundary
else Mesh->IFace[counter].ElemR = i*(2*N)+j*2 -(2*N-1);
Mesh->IFace[counter].EdgeL = 2;
Mesh->IFace[counter].EdgeR = 2;
Mesh->IFace[counter].node[0] = i*(N+1)+j;
Mesh->IFace[counter].node[1] = i*(N+1)+j +1;
Mesh->E2F[Mesh->IFace[counter].ElemL][2] = counter;
Mesh->E2F[Mesh->IFace[counter].ElemR][2] = counter;
counter ++;
}
}
return hipSuccess;
}
/* Compute mesh info, include element joacobian, edge length, edge normal */
hipError_t computeMeshInfo(DG_Mesh *Mesh)
{
int nElem = Mesh->nElem;
int nIFace = Mesh->nIFace;
DG_IFace *IFace = Mesh->IFace;
double **coord = Mesh->coord;
int **E2N = Mesh->E2N;
double *tempJac;
double *tempInvJac;
// allocate the memory for mesh info
CUDA_CALL(hipMallocManaged(&(Mesh->Jac), nElem*sizeof(double *)));
CUDA_CALL(hipMallocManaged(&tempJac, 4*nElem*sizeof(double)));
CUDA_CALL(hipMallocManaged(&(Mesh->detJ), nElem*sizeof(double)));
CUDA_CALL(hipMallocManaged(&(Mesh->InvJac), nElem*sizeof(double *)));
CUDA_CALL(hipMallocManaged(&tempInvJac, 4*nElem*sizeof(double)));
CUDA_CALL(hipMallocManaged(&(Mesh->Length), nIFace*sizeof(double)));
CUDA_CALL(hipMallocManaged(&(Mesh->normal), nIFace*2*sizeof(double)));
int i;
double *x0, *x1, *x2;
for (i=0; i<nElem; i++){
// allocate Jacobian data
Mesh->Jac[i] = tempJac + 4*i;
// allocate Inverse Jacbian data
Mesh->InvJac[i] = tempInvJac + 4*i;
x0 = coord[E2N[i][0]];
x1 = coord[E2N[i][1]];
x2 = coord[E2N[i][2]];
Mesh->Jac[i][0] = x1[0] - x0[0];
Mesh->Jac[i][1] = x2[0] - x0[0];
Mesh->Jac[i][2] = x1[1] - x0[1];
Mesh->Jac[i][3] = x2[1] - x0[1];
Mesh->detJ[i] = Mesh->Jac[i][0]*Mesh->Jac[i][3] - Mesh->Jac[i][1]*Mesh->Jac[i][2];
Mesh->InvJac[i][0] = x2[1] - x0[1];
Mesh->InvJac[i][1] = x0[0] - x2[0];
Mesh->InvJac[i][2] = x0[1] - x1[1];
Mesh->InvJac[i][3] = x1[0] - x0[0];
}
double xA, yA, xB, yB;
for (i=0; i<nIFace; i++){
xA = coord[IFace[i].node[0]][0];
yA = coord[IFace[i].node[0]][1];
xB = coord[IFace[i].node[1]][0];
yB = coord[IFace[i].node[1]][1];
Mesh->Length[i] = sqrt((xA-xB)*(xA-xB) + (yA-yB)*(yA-yB));
Mesh->normal[i*2] = (yB-yA)/(Mesh->Length[i]);
Mesh->normal[i*2+1] = (xA-xB)/(Mesh->Length[i]);
}
return hipSuccess;
}
/* free the mesh memory */
hipError_t freeMesh(DG_Mesh *Mesh)
{
// free mesh coord
CUDA_CALL(hipFree(Mesh->coord[0]));
CUDA_CALL(hipFree(Mesh->coord));
// free mesh E2N
CUDA_CALL(hipFree(Mesh->E2N[0]));
CUDA_CALL(hipFree(Mesh->E2N));
// free interior faces
CUDA_CALL(hipFree(Mesh->E2F[0]));
CUDA_CALL(hipFree(Mesh->E2F));
CUDA_CALL(hipFree(Mesh->IFace));
// free Jacobian data
if (Mesh->Jac != NULL){
CUDA_CALL(hipFree(Mesh->Jac[0]));
CUDA_CALL(hipFree(Mesh->InvJac[0]));
CUDA_CALL(hipFree(Mesh->Jac));
CUDA_CALL(hipFree(Mesh->detJ));
CUDA_CALL(hipFree(Mesh->InvJac));
}
// free face length and normal data
if (Mesh->Length != NULL) {
CUDA_CALL(hipFree(Mesh->Length));
CUDA_CALL(hipFree(Mesh->normal));
}
CUDA_CALL(hipFree(Mesh));
return hipSuccess;
}
| 23cdc812492b7b70f017d4a7c0bb28ccfecba9f9.cu | /* This is the source file contains the methods for the Mesh struct
*
* Author: Guodong Chen
* Email: [email protected]
* Last modified: 12/04/2019
*/
#include "DG_Mesh.cuh"
#include "CUDA_Helper.cuh"
/* initialize the mesh struct */
cudaError_t initMesh(DG_Mesh *Mesh){
Mesh->halfL = 0;
Mesh->N = 0;
Mesh->h = 0;
Mesh->nNode = 0;
Mesh->coord = NULL; // V matrix
Mesh->nElem = 0;
Mesh->E2N = NULL; // E2N
Mesh->nIFace = 0;
Mesh->E2F = NULL;
Mesh->IFace = NULL;
Mesh->Jac = NULL;
Mesh->detJ = NULL;
Mesh->InvJac = NULL;
Mesh->Length = NULL;
Mesh->normal = NULL;
return cudaSuccess;
}
/* create Mesh struct: allocate and initialize */
cudaError_t createMesh(DG_Mesh **pMesh)
{
CUDA_CALL(cudaMallocManaged(pMesh, sizeof(DG_Mesh)));
CUDA_CALL(initMesh(*pMesh));
return cudaSuccess;
}
/* actually generate the mesh, fill in the mesh struct members */
cudaError_t generateMesh(DG_Mesh *Mesh, double halfL, int N)
{
int i, j;
int counter;
Mesh->halfL = halfL;
Mesh->N = N;
Mesh->h = (2*halfL)/N;
Mesh->nNode = (N+1)*(N+1);
double *tempCoord;
int *tempE2N;
int *tempE2F;
// allocate the coords
CUDA_CALL(cudaMallocManaged(&(Mesh->coord), Mesh->nNode*sizeof(double *)));
CUDA_CALL(cudaMallocManaged(&(tempCoord), 2*Mesh->nNode*sizeof(double)));
for (i=0; i<Mesh->nNode; i++)
Mesh->coord[i] = tempCoord + 2*i;
// assign coords for every node
for (i=0; i<N+1; i++)
{
for (j=0; j<N+1; j++)
{
Mesh->coord[i*(N+1)+j][0] = -halfL + j*Mesh->h;
Mesh->coord[i*(N+1)+j][1] = -halfL + i*Mesh->h;
}
}
Mesh->nElem = 2*N*N;
// allocate E2N matrix
CUDA_CALL(cudaMallocManaged(&(Mesh->E2N), Mesh->nElem*sizeof(int *)));
CUDA_CALL(cudaMallocManaged(&tempE2N, 3*Mesh->nElem*sizeof(int)));
for (i=0; i<Mesh->nElem; i++)
Mesh->E2N[i] = tempE2N + 3*i;
// fill in the E2N matrix
counter = 0;
for (i=0; i<N; i++)
{
for (j=0; j<N; j++)
{
Mesh->E2N[counter][0] = i*(N+1)+j;
Mesh->E2N[counter][1] = i*(N+1)+j +1;
Mesh->E2N[counter][2] = i*(N+1)+j +(N+1);
counter++;
Mesh->E2N[counter][0] = i*(N+1)+j +1 +(N+1);
Mesh->E2N[counter][1] = i*(N+1)+j +1 +N;
Mesh->E2N[counter][2] = i*(N+1)+j +1;
counter++;
}
}
Mesh->nIFace = 3*N*N;
CUDA_CALL(cudaMallocManaged(&(Mesh->E2F), Mesh->nElem*sizeof(int *)));
CUDA_CALL(cudaMallocManaged(&tempE2F, Mesh->nElem*3*sizeof(int)));
for (i=0; i<Mesh->nElem; i++)
Mesh->E2F[i] = &(tempE2F[i*3]);
// allocate the interior faces
CUDA_CALL(cudaMallocManaged(&(Mesh->IFace), Mesh->nIFace*sizeof(DG_IFace)));
counter = 0;
for (i=0; i<N; i++)
{
for (j=0; j<N; j++)
{
Mesh->IFace[counter].ElemL = i*(2*N)+j*2;
Mesh->IFace[counter].ElemR = i*(2*N)+j*2 +1;
Mesh->IFace[counter].EdgeL = 0;
Mesh->IFace[counter].EdgeR = 0;
Mesh->IFace[counter].node[0] = i*(N+1)+j +1;
Mesh->IFace[counter].node[1] = i*(N+1)+j +(N+1);
Mesh->E2F[Mesh->IFace[counter].ElemL][0] = counter;
Mesh->E2F[Mesh->IFace[counter].ElemR][0] = counter;
counter ++;
Mesh->IFace[counter].ElemL = i*(2*N)+j*2;
if (j==0) Mesh->IFace[counter].ElemR = i*(2*N)+j*2 +2*N-1; // Periodic boundary
else Mesh->IFace[counter].ElemR = i*(2*N)+j*2 -1;
Mesh->IFace[counter].EdgeL = 1;
Mesh->IFace[counter].EdgeR = 1;
Mesh->IFace[counter].node[0] = i*(N+1)+j +(N+1);
Mesh->IFace[counter].node[1] = i*(N+1)+j;
Mesh->E2F[Mesh->IFace[counter].ElemL][1] = counter;
Mesh->E2F[Mesh->IFace[counter].ElemR][1] = counter;
counter++;
Mesh->IFace[counter].ElemL = i*(2*N)+j*2;
if (i==0) Mesh->IFace[counter].ElemR = i*(2*N)+j*2 +(N-1)*(2*N) +1; // Periodic boundary
else Mesh->IFace[counter].ElemR = i*(2*N)+j*2 -(2*N-1);
Mesh->IFace[counter].EdgeL = 2;
Mesh->IFace[counter].EdgeR = 2;
Mesh->IFace[counter].node[0] = i*(N+1)+j;
Mesh->IFace[counter].node[1] = i*(N+1)+j +1;
Mesh->E2F[Mesh->IFace[counter].ElemL][2] = counter;
Mesh->E2F[Mesh->IFace[counter].ElemR][2] = counter;
counter ++;
}
}
return cudaSuccess;
}
/* Compute mesh info, include element joacobian, edge length, edge normal */
cudaError_t computeMeshInfo(DG_Mesh *Mesh)
{
int nElem = Mesh->nElem;
int nIFace = Mesh->nIFace;
DG_IFace *IFace = Mesh->IFace;
double **coord = Mesh->coord;
int **E2N = Mesh->E2N;
double *tempJac;
double *tempInvJac;
// allocate the memory for mesh info
CUDA_CALL(cudaMallocManaged(&(Mesh->Jac), nElem*sizeof(double *)));
CUDA_CALL(cudaMallocManaged(&tempJac, 4*nElem*sizeof(double)));
CUDA_CALL(cudaMallocManaged(&(Mesh->detJ), nElem*sizeof(double)));
CUDA_CALL(cudaMallocManaged(&(Mesh->InvJac), nElem*sizeof(double *)));
CUDA_CALL(cudaMallocManaged(&tempInvJac, 4*nElem*sizeof(double)));
CUDA_CALL(cudaMallocManaged(&(Mesh->Length), nIFace*sizeof(double)));
CUDA_CALL(cudaMallocManaged(&(Mesh->normal), nIFace*2*sizeof(double)));
int i;
double *x0, *x1, *x2;
for (i=0; i<nElem; i++){
// allocate Jacobian data
Mesh->Jac[i] = tempJac + 4*i;
// allocate Inverse Jacbian data
Mesh->InvJac[i] = tempInvJac + 4*i;
x0 = coord[E2N[i][0]];
x1 = coord[E2N[i][1]];
x2 = coord[E2N[i][2]];
Mesh->Jac[i][0] = x1[0] - x0[0];
Mesh->Jac[i][1] = x2[0] - x0[0];
Mesh->Jac[i][2] = x1[1] - x0[1];
Mesh->Jac[i][3] = x2[1] - x0[1];
Mesh->detJ[i] = Mesh->Jac[i][0]*Mesh->Jac[i][3] - Mesh->Jac[i][1]*Mesh->Jac[i][2];
Mesh->InvJac[i][0] = x2[1] - x0[1];
Mesh->InvJac[i][1] = x0[0] - x2[0];
Mesh->InvJac[i][2] = x0[1] - x1[1];
Mesh->InvJac[i][3] = x1[0] - x0[0];
}
double xA, yA, xB, yB;
for (i=0; i<nIFace; i++){
xA = coord[IFace[i].node[0]][0];
yA = coord[IFace[i].node[0]][1];
xB = coord[IFace[i].node[1]][0];
yB = coord[IFace[i].node[1]][1];
Mesh->Length[i] = sqrt((xA-xB)*(xA-xB) + (yA-yB)*(yA-yB));
Mesh->normal[i*2] = (yB-yA)/(Mesh->Length[i]);
Mesh->normal[i*2+1] = (xA-xB)/(Mesh->Length[i]);
}
return cudaSuccess;
}
/* free the mesh memory */
cudaError_t freeMesh(DG_Mesh *Mesh)
{
// free mesh coord
CUDA_CALL(cudaFree(Mesh->coord[0]));
CUDA_CALL(cudaFree(Mesh->coord));
// free mesh E2N
CUDA_CALL(cudaFree(Mesh->E2N[0]));
CUDA_CALL(cudaFree(Mesh->E2N));
// free interior faces
CUDA_CALL(cudaFree(Mesh->E2F[0]));
CUDA_CALL(cudaFree(Mesh->E2F));
CUDA_CALL(cudaFree(Mesh->IFace));
// free Jacobian data
if (Mesh->Jac != NULL){
CUDA_CALL(cudaFree(Mesh->Jac[0]));
CUDA_CALL(cudaFree(Mesh->InvJac[0]));
CUDA_CALL(cudaFree(Mesh->Jac));
CUDA_CALL(cudaFree(Mesh->detJ));
CUDA_CALL(cudaFree(Mesh->InvJac));
}
// free face length and normal data
if (Mesh->Length != NULL) {
CUDA_CALL(cudaFree(Mesh->Length));
CUDA_CALL(cudaFree(Mesh->normal));
}
CUDA_CALL(cudaFree(Mesh));
return cudaSuccess;
}
|
31fa58cb2d382d893a9338b38ca9ec856051060b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
if (index_x < numCols && index_y < numRows) {
int index = numCols * index_y + index_x;
uchar4 rgb_value = rgbaImage[index];
greyImage[index] = .299f*rgb_value.x + .587f*rgb_value.y + .114f*rgb_value.z;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const int blockWidth = 32;
const dim3 blockSize(blockWidth, blockWidth, 1);
const dim3 gridSize( numCols/blockWidth + 1, numRows/blockWidth + 1, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
| 31fa58cb2d382d893a9338b38ca9ec856051060b.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
if (index_x < numCols && index_y < numRows) {
int index = numCols * index_y + index_x;
uchar4 rgb_value = rgbaImage[index];
greyImage[index] = .299f*rgb_value.x + .587f*rgb_value.y + .114f*rgb_value.z;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const int blockWidth = 32;
const dim3 blockSize(blockWidth, blockWidth, 1);
const dim3 gridSize( numCols/blockWidth + 1, numRows/blockWidth + 1, 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
|
d12e5bfd4a0543f8e65d62205bdd7946a1c72872.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SLIC_cuda.h"
#if __CUDA_ARCH__<300
texture<uchar4, hipTextureType2D, hipReadModeElementType> frameBGRA_tex;
surface<void, cudaSurfaceType2D> frameLab_surf;
surface<void, cudaSurfaceType2D> labels_surf;
#endif
//======== device local function ============
__device__ float2 operator-(const float2 & a, const float2 & b) { return make_float2(a.x - b.x, a.y - b.y); }
__device__ float3 operator-(const float3 & a, const float3 & b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); }
__device__ int2 operator+(const int2 & a, const int2 & b) { return make_int2(a.x + b.x, a.y + b.y); }
__device__ float computeDistance(float2 c_p_xy, float3 c_p_Lab, float areaSpx, float wc2){
float ds2 = pow(c_p_xy.x, 2) + pow(c_p_xy.y, 2);
float dc2 = pow(c_p_Lab.x, 2) + pow(c_p_Lab.y, 2) + pow(c_p_Lab.z, 2);
float dist = sqrt(dc2 + ds2 / areaSpx*wc2);
return dist;
}
__device__ int convertIdx(int2 wg, int lc_idx, int nBloc_per_row){
int2 relPos2D = make_int2(lc_idx % 5 - 2, lc_idx / 5 - 2);
int2 glPos2D = wg + relPos2D;
return glPos2D.y*nBloc_per_row + glPos2D.x;
}
//============ Kernel ===============
#if __CUDA_ARCH__<300
__global__ void kRgb2CIELab(int width, int height)
{
int px = blockIdx.x*blockDim.x + threadIdx.x;
int py = blockIdx.y*blockDim.y + threadIdx.y;
if (px < width && py < height) {
uchar4 nPixel = tex2D(frameBGRA_tex, px, py);//inputImg[offset];
float _b = (float)nPixel.x / 255.0;
float _g = (float)nPixel.y / 255.0;
float _r = (float)nPixel.z / 255.0;
float x = _r * 0.412453 + _g * 0.357580 + _b * 0.180423;
float y = _r * 0.212671 + _g * 0.715160 + _b * 0.072169;
float z = _r * 0.019334 + _g * 0.119193 + _b * 0.950227;
x /= 0.950456;
float y3 = exp(log(y) / 3.0);
z /= 1.088754;
float l, a, b;
x = x > 0.008856 ? exp(log(x) / 3.0) : (7.787 * x + 0.13793);
y = y > 0.008856 ? y3 : 7.787 * y + 0.13793;
z = z > 0.008856 ? z /= exp(log(z) / 3.0) : (7.787 * z + 0.13793);
l = y > 0.008856 ? (116.0 * y3 - 16.0) : 903.3 * y;
a = (x - y) * 500.0;
b = (y - z) * 200.0;
float4 fPixel;
fPixel.x = l;
fPixel.y = a;
fPixel.z = b;
fPixel.w = 0;
surf2Dwrite(fPixel, frameLab_surf, px * 16, py);
}
}
#else
__global__ void kRgb2CIELab(hipTextureObject_t inputImg, hipSurfaceObject_t outputImg, int width, int height)
{
int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width;
int offset = offsetBlock + threadIdx.x + threadIdx.y * width;
int px = blockIdx.x*blockDim.x + threadIdx.x;
int py = blockIdx.y*blockDim.y + threadIdx.y;
if (px<width && py<height) {
uchar4 nPixel = tex2D<uchar4>(inputImg, px, py);//inputImg[offset];
float _b = (float)nPixel.x / 255.0;
float _g = (float)nPixel.y / 255.0;
float _r = (float)nPixel.z / 255.0;
float x = _r * 0.412453 + _g * 0.357580 + _b * 0.180423;
float y = _r * 0.212671 + _g * 0.715160 + _b * 0.072169;
float z = _r * 0.019334 + _g * 0.119193 + _b * 0.950227;
x /= 0.950456;
float y3 = exp(log(y) / 3.0);
z /= 1.088754;
float l, a, b;
x = x > 0.008856 ? exp(log(x) / 3.0) : (7.787 * x + 0.13793);
y = y > 0.008856 ? y3 : 7.787 * y + 0.13793;
z = z > 0.008856 ? z /= exp(log(z) / 3.0) : (7.787 * z + 0.13793);
l = y > 0.008856 ? (116.0 * y3 - 16.0) : 903.3 * y;
a = (x - y) * 500.0;
b = (y - z) * 200.0;
float4 fPixel;
fPixel.x = l;
fPixel.y = a;
fPixel.z = b;
fPixel.w = 0;
surf2Dwrite(fPixel, outputImg, px * 16, py);
}
}
#endif
#if __CUDA_ARCH__<300
__global__ void k_initClusters(float* clusters, int width, int height, int nSpxPerRow, int nSpxPerCol){
int idx_c = blockIdx.x*blockDim.x + threadIdx.x, idx_c5 = idx_c * 5;
int nSpx = nSpxPerCol*nSpxPerRow;
if (idx_c<nSpx){
int wSpx = width / nSpxPerRow, hSpx = height / nSpxPerCol;
int i = idx_c / nSpxPerRow;
int j = idx_c%nSpxPerRow;
int x = j*wSpx + wSpx / 2;
int y = i*hSpx + hSpx / 2;
float4 color;
surf2Dread(&color, frameLab_surf, x * 16, y);
clusters[idx_c5] = color.x;
clusters[idx_c5 + 1] = color.y;
clusters[idx_c5 + 2] = color.z;
clusters[idx_c5 + 3] = x;
clusters[idx_c5 + 4] = y;
}
}
#else
__global__ void k_initClusters(hipSurfaceObject_t frameLab, float* clusters, int width, int height, int nSpxPerRow, int nSpxPerCol){
int idx_c = blockIdx.x*blockDim.x + threadIdx.x, idx_c5 = idx_c * 5;
int nSpx = nSpxPerCol*nSpxPerRow;
if (idx_c<nSpx){
int wSpx = width / nSpxPerRow, hSpx = height / nSpxPerCol;
int i = idx_c / nSpxPerRow;
int j = idx_c%nSpxPerRow;
int x = j*wSpx + wSpx / 2;
int y = i*hSpx + hSpx / 2;
float4 color;
surf2Dread(&color, frameLab, x * 16, y);
clusters[idx_c5] = color.x;
clusters[idx_c5 + 1] = color.y;
clusters[idx_c5 + 2] = color.z;
clusters[idx_c5 + 3] = x;
clusters[idx_c5 + 4] = y;
}
}
#endif
#if __CUDA_ARCH__<300
__global__ void k_assignement(int width, int height, int wSpx, int hSpx, float* clusters, float* accAtt_g, float wc2){
// gather NNEIGH surrounding clusters
__shared__ float4 sharedLab[NNEIGH][NNEIGH];
__shared__ float2 sharedXY[NNEIGH][NNEIGH];
int nClustPerRow = width / wSpx;
int nn2 = NNEIGH / 2;
if (threadIdx.x<NNEIGH && threadIdx.y<NNEIGH)
{
int id_x = threadIdx.x - nn2;
int id_y = threadIdx.y - nn2;
int clustLinIdx = blockIdx.x + id_y*nClustPerRow + id_x;
if (clustLinIdx >= 0 && clustLinIdx<gridDim.x)
{
int clustLinIdx5 = clustLinIdx * 5;
sharedLab[threadIdx.y][threadIdx.x].x = clusters[clustLinIdx5];
sharedLab[threadIdx.y][threadIdx.x].y = clusters[clustLinIdx5 + 1];
sharedLab[threadIdx.y][threadIdx.x].z = clusters[clustLinIdx5 + 2];
sharedXY[threadIdx.y][threadIdx.x].x = clusters[clustLinIdx5 + 3];
sharedXY[threadIdx.y][threadIdx.x].y = clusters[clustLinIdx5 + 4];
}
else
{
sharedLab[threadIdx.y][threadIdx.x].x = -1;
}
}
__syncthreads();
// Find nearest neighbour
float areaSpx = wSpx*hSpx;
float distanceMin = MAX_DIST;
float labelMin = -1;
int px_in_grid = blockIdx.x*blockDim.x + threadIdx.x;
int py_in_grid = blockIdx.y*blockDim.y + threadIdx.y;
int px = px_in_grid%width;
if (py_in_grid<hSpx && px<width)
{
int py = py_in_grid + px_in_grid / width*hSpx;
//int pxpy = py*width+px;
float4 color;
surf2Dread(&color, frameLab_surf, px * 16, py);
//float3 px_Lab = make_float3(frameLab[pxpy].x,frameLab[pxpy].y,frameLab[pxpy].z);
float3 px_Lab = make_float3(color.x, color.y, color.z);
float2 px_xy = make_float2(px, py);
for (int i = 0; i<NNEIGH; i++)
{
for (int j = 0; j<NNEIGH; j++)
{
if (sharedLab[i][j].x != -1)
{
float2 cluster_xy = make_float2(sharedXY[i][j].x, sharedXY[i][j].y);
float3 cluster_Lab = make_float3(sharedLab[i][j].x, sharedLab[i][j].y, sharedLab[i][j].z);
float2 px_c_xy = px_xy - cluster_xy;
float3 px_c_Lab = px_Lab - cluster_Lab;
float distTmp = fminf(computeDistance(px_c_xy, px_c_Lab, areaSpx, wc2), distanceMin);
if (distTmp != distanceMin){
distanceMin = distTmp;
labelMin = blockIdx.x + (i - nn2)*nClustPerRow + (j - nn2);
}
}
}
}
surf2Dwrite(labelMin, labels_surf, px * 4, py);
int labelMin6 = int(labelMin * 6);
atomicAdd(&accAtt_g[labelMin6], px_Lab.x);
atomicAdd(&accAtt_g[labelMin6 + 1], px_Lab.y);
atomicAdd(&accAtt_g[labelMin6 + 2], px_Lab.z);
atomicAdd(&accAtt_g[labelMin6 + 3], px);
atomicAdd(&accAtt_g[labelMin6 + 4], py);
atomicAdd(&accAtt_g[labelMin6 + 5], 1); //counter
}
}
#else
__global__ void k_assignement(int width, int height, int wSpx, int hSpx, hipSurfaceObject_t frameLab, hipSurfaceObject_t labels, float* clusters, float* accAtt_g, float wc2){
// gather NNEIGH surrounding clusters
__shared__ float4 sharedLab[NNEIGH][NNEIGH];
__shared__ float2 sharedXY[NNEIGH][NNEIGH];
int nClustPerRow = width / wSpx;
int nn2 = NNEIGH / 2;
if (threadIdx.x<NNEIGH && threadIdx.y<NNEIGH)
{
int id_x = threadIdx.x - nn2;
int id_y = threadIdx.y - nn2;
int clustLinIdx = blockIdx.x + id_y*nClustPerRow + id_x;
if (clustLinIdx >= 0 && clustLinIdx<gridDim.x)
{
int clustLinIdx5 = clustLinIdx * 5;
sharedLab[threadIdx.y][threadIdx.x].x = clusters[clustLinIdx5];
sharedLab[threadIdx.y][threadIdx.x].y = clusters[clustLinIdx5 + 1];
sharedLab[threadIdx.y][threadIdx.x].z = clusters[clustLinIdx5 + 2];
sharedXY[threadIdx.y][threadIdx.x].x = clusters[clustLinIdx5 + 3];
sharedXY[threadIdx.y][threadIdx.x].y = clusters[clustLinIdx5 + 4];
}
else
{
sharedLab[threadIdx.y][threadIdx.x].x = -1;
}
}
__syncthreads();
// Find nearest neighbour
float areaSpx = wSpx*hSpx;
float distanceMin = MAX_DIST;
float labelMin = -1;
int px_in_grid = blockIdx.x*blockDim.x + threadIdx.x;
int py_in_grid = blockIdx.y*blockDim.y + threadIdx.y;
int px = px_in_grid%width;
if (py_in_grid<hSpx && px<width)
{
int py = py_in_grid + px_in_grid / width*hSpx;
int pxpy = py*width + px;
float4 color;
surf2Dread(&color, frameLab, px * 16, py);
//float3 px_Lab = make_float3(frameLab[pxpy].x,frameLab[pxpy].y,frameLab[pxpy].z);
float3 px_Lab = make_float3(color.x, color.y, color.z);
float2 px_xy = make_float2(px, py);
for (int i = 0; i<NNEIGH; i++)
{
for (int j = 0; j<NNEIGH; j++)
{
if (sharedLab[i][j].x != -1)
{
float2 cluster_xy = make_float2(sharedXY[i][j].x, sharedXY[i][j].y);
float3 cluster_Lab = make_float3(sharedLab[i][j].x, sharedLab[i][j].y, sharedLab[i][j].z);
float2 px_c_xy = px_xy - cluster_xy;
float3 px_c_Lab = px_Lab - cluster_Lab;
float distTmp = fminf(computeDistance(px_c_xy, px_c_Lab, areaSpx, wc2), distanceMin);
if (distTmp != distanceMin){
distanceMin = distTmp;
labelMin = blockIdx.x + (i - nn2)*nClustPerRow + (j - nn2);
}
}
}
}
surf2Dwrite(labelMin, labels, px * 4, py);
int labelMin6 = int(labelMin * 6);
atomicAdd(&accAtt_g[labelMin6], px_Lab.x);
atomicAdd(&accAtt_g[labelMin6 + 1], px_Lab.y);
atomicAdd(&accAtt_g[labelMin6 + 2], px_Lab.z);
atomicAdd(&accAtt_g[labelMin6 + 3], px);
atomicAdd(&accAtt_g[labelMin6 + 4], py);
atomicAdd(&accAtt_g[labelMin6 + 5], 1); //counter
}
}
#endif
__global__ void k_update(int nSpx, float* clusters, float* accAtt_g)
{
int cluster_idx = blockIdx.x*blockDim.x + threadIdx.x;
if (cluster_idx<nSpx)
{
uint cluster_idx6 = cluster_idx * 6;
uint cluster_idx5 = cluster_idx * 5;
int counter = accAtt_g[cluster_idx6 + 5];
if (counter != 0){
clusters[cluster_idx5] = accAtt_g[cluster_idx6] / counter;
clusters[cluster_idx5 + 1] = accAtt_g[cluster_idx6 + 1] / counter;
clusters[cluster_idx5 + 2] = accAtt_g[cluster_idx6 + 2] / counter;
clusters[cluster_idx5 + 3] = accAtt_g[cluster_idx6 + 3] / counter;
clusters[cluster_idx5 + 4] = accAtt_g[cluster_idx6 + 4] / counter;
//reset accumulator
accAtt_g[cluster_idx6] = 0;
accAtt_g[cluster_idx6 + 1] = 0;
accAtt_g[cluster_idx6 + 2] = 0;
accAtt_g[cluster_idx6 + 3] = 0;
accAtt_g[cluster_idx6 + 4] = 0;
accAtt_g[cluster_idx6 + 5] = 0;
}
}
}
//============== wrapper =================
__host__ void SLIC_cuda::InitBuffers() {
//allocate buffers on gpu
//gpuErrchk(hipMalloc((void**)&frameBGRA_g, m_nPx*sizeof(uchar4))); //4 channels for padding
hipChannelFormatDesc channelDescr = hipCreateChannelDesc(8, 8, 8, 8, hipChannelFormatKindUnsigned);
gpuErrchk(hipMallocArray(&frameBGRA_array, &channelDescr, m_width, m_height));
hipChannelFormatDesc channelDescrLab = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat);
gpuErrchk(hipMallocArray(&frameLab_array, &channelDescrLab, m_width, m_height, hipArraySurfaceLoadStore));
hipChannelFormatDesc channelDescrLabels = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
gpuErrchk(hipMallocArray(&labels_array, &channelDescrLabels, m_width, m_height, hipArraySurfaceLoadStore));
//texture FrameBGR (read-only)
#if __CUDA_ARCH__>=300
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = frameBGRA_array;
// Specify texture object parameters
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = false;
gpuErrchk(hipCreateTextureObject(&frameBGRA_tex, &resDesc, &texDesc, NULL));
#else
frameBGRA_tex.addressMode[0] = hipAddressModeClamp;
frameBGRA_tex.addressMode[1] = hipAddressModeClamp;
frameBGRA_tex.filterMode = hipFilterModePoint;
frameBGRA_tex.normalized = false;
hipBindTextureToArray(&frameBGRA_tex, frameBGRA_array, &channelDescr);
#endif
// surface frameLab
#if __CUDA_ARCH__>=300
hipResourceDesc resDescLab;
memset(&resDescLab, 0, sizeof(resDescLab));
resDescLab.resType = hipResourceTypeArray;
resDescLab.res.array.array = frameLab_array;
gpuErrchk(hipCreateSurfaceObject(&frameLab_surf, &resDescLab));
#else
hipBindSurfaceToArray(&frameLab_surf, frameLab_array, &channelDescrLab);
#endif
// surface labels
#if __CUDA_ARCH__>=300
hipResourceDesc resDescLabels;
memset(&resDescLabels, 0, sizeof(resDescLabels));
resDescLabels.resType = hipResourceTypeArray;
resDescLabels.res.array.array = labels_array;
gpuErrchk(hipCreateSurfaceObject(&labels_surf, &resDescLabels));
#else
hipBindSurfaceToArray(&labels_surf, labels_array, &channelDescrLabels);
#endif
// buffers clusters , accAtt
gpuErrchk(hipMalloc((void**)&clusters_g, m_nSpx*sizeof(float)* 5)); // 5-D centroid
gpuErrchk(hipMalloc((void**)&accAtt_g, m_nSpx*sizeof(float)* 6)); // 5-D centroid acc + 1 counter
hipMemset(accAtt_g, 0, m_nSpx*sizeof(float)* 6);//initialize accAtt to 0
}
#if __CUDA_ARCH__>=300
__host__ void SLIC_cuda::Rgb2CIELab(hipTextureObject_t inputImg, hipSurfaceObject_t outputImg, int width, int height)
{
int side = 16;
dim3 threadsPerBlock(side, side);
dim3 numBlocks(iDivUp(m_width, side), iDivUp(m_height, side));
kRgb2CIELab << <numBlocks, threadsPerBlock >> >(inputImg, outputImg, width, height);
}
#else
__host__ void SLIC_cuda::Rgb2CIELab(int width, int height)
{
int side = 16;
dim3 threadsPerBlock(side, side);
dim3 numBlocks(iDivUp(m_width, side), iDivUp(m_height, side));
kRgb2CIELab << <numBlocks, threadsPerBlock >> >(width, height);
}
#endif
__host__ void SLIC_cuda::InitClusters()
{
dim3 threadsPerBlock(NMAX_THREAD);
dim3 numBlocks(iDivUp(m_nSpx, NMAX_THREAD));
#if __CUDA_ARCH__>=300
k_initClusters << <numBlocks, threadsPerBlock >> >(frameLab_surf, clusters_g, m_width, m_height, m_width / m_wSpx, m_height / m_hSpx);
#else
k_initClusters << <numBlocks, threadsPerBlock >> >(clusters_g, m_width, m_height, m_width / m_wSpx, m_height / m_hSpx);
#endif
}
__host__ void SLIC_cuda::Assignement() {
int hMax = NMAX_THREAD / m_hSpx;
int nBlockPerClust = iDivUp(m_hSpx, hMax);
dim3 blockPerGrid(m_nSpx, nBlockPerClust);
dim3 threadPerBlock(m_wSpx, ::min(m_hSpx, hMax));
CV_Assert(threadPerBlock.x >= 3 && threadPerBlock.y >= 3);
float wc2 = m_wc * m_wc;
#if __CUDA_ARCH__>=300
k_assignement << < blockPerGrid, threadPerBlock >> >(m_width, m_height, m_wSpx, m_hSpx, frameLab_surf, labels_surf, clusters_g, accAtt_g, wc2);
#else
k_assignement << < blockPerGrid, threadPerBlock >> >(m_width, m_height, m_wSpx, m_hSpx, clusters_g, accAtt_g, wc2);
#endif
}
__host__ void SLIC_cuda::Update()
{
dim3 threadsPerBlock(NMAX_THREAD);
dim3 numBlocks(iDivUp(m_nSpx, NMAX_THREAD));
k_update << <numBlocks, threadsPerBlock >> >(m_nSpx, clusters_g, accAtt_g);
}
| d12e5bfd4a0543f8e65d62205bdd7946a1c72872.cu | #include "SLIC_cuda.h"
#if __CUDA_ARCH__<300
texture<uchar4, cudaTextureType2D, cudaReadModeElementType> frameBGRA_tex;
surface<void, cudaSurfaceType2D> frameLab_surf;
surface<void, cudaSurfaceType2D> labels_surf;
#endif
//======== device local function ============
__device__ float2 operator-(const float2 & a, const float2 & b) { return make_float2(a.x - b.x, a.y - b.y); }
__device__ float3 operator-(const float3 & a, const float3 & b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); }
__device__ int2 operator+(const int2 & a, const int2 & b) { return make_int2(a.x + b.x, a.y + b.y); }
__device__ float computeDistance(float2 c_p_xy, float3 c_p_Lab, float areaSpx, float wc2){
float ds2 = pow(c_p_xy.x, 2) + pow(c_p_xy.y, 2);
float dc2 = pow(c_p_Lab.x, 2) + pow(c_p_Lab.y, 2) + pow(c_p_Lab.z, 2);
float dist = sqrt(dc2 + ds2 / areaSpx*wc2);
return dist;
}
__device__ int convertIdx(int2 wg, int lc_idx, int nBloc_per_row){
int2 relPos2D = make_int2(lc_idx % 5 - 2, lc_idx / 5 - 2);
int2 glPos2D = wg + relPos2D;
return glPos2D.y*nBloc_per_row + glPos2D.x;
}
//============ Kernel ===============
#if __CUDA_ARCH__<300
__global__ void kRgb2CIELab(int width, int height)
{
int px = blockIdx.x*blockDim.x + threadIdx.x;
int py = blockIdx.y*blockDim.y + threadIdx.y;
if (px < width && py < height) {
uchar4 nPixel = tex2D(frameBGRA_tex, px, py);//inputImg[offset];
float _b = (float)nPixel.x / 255.0;
float _g = (float)nPixel.y / 255.0;
float _r = (float)nPixel.z / 255.0;
float x = _r * 0.412453 + _g * 0.357580 + _b * 0.180423;
float y = _r * 0.212671 + _g * 0.715160 + _b * 0.072169;
float z = _r * 0.019334 + _g * 0.119193 + _b * 0.950227;
x /= 0.950456;
float y3 = exp(log(y) / 3.0);
z /= 1.088754;
float l, a, b;
x = x > 0.008856 ? exp(log(x) / 3.0) : (7.787 * x + 0.13793);
y = y > 0.008856 ? y3 : 7.787 * y + 0.13793;
z = z > 0.008856 ? z /= exp(log(z) / 3.0) : (7.787 * z + 0.13793);
l = y > 0.008856 ? (116.0 * y3 - 16.0) : 903.3 * y;
a = (x - y) * 500.0;
b = (y - z) * 200.0;
float4 fPixel;
fPixel.x = l;
fPixel.y = a;
fPixel.z = b;
fPixel.w = 0;
surf2Dwrite(fPixel, frameLab_surf, px * 16, py);
}
}
#else
__global__ void kRgb2CIELab(cudaTextureObject_t inputImg, cudaSurfaceObject_t outputImg, int width, int height)
{
int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width;
int offset = offsetBlock + threadIdx.x + threadIdx.y * width;
int px = blockIdx.x*blockDim.x + threadIdx.x;
int py = blockIdx.y*blockDim.y + threadIdx.y;
if (px<width && py<height) {
uchar4 nPixel = tex2D<uchar4>(inputImg, px, py);//inputImg[offset];
float _b = (float)nPixel.x / 255.0;
float _g = (float)nPixel.y / 255.0;
float _r = (float)nPixel.z / 255.0;
float x = _r * 0.412453 + _g * 0.357580 + _b * 0.180423;
float y = _r * 0.212671 + _g * 0.715160 + _b * 0.072169;
float z = _r * 0.019334 + _g * 0.119193 + _b * 0.950227;
x /= 0.950456;
float y3 = exp(log(y) / 3.0);
z /= 1.088754;
float l, a, b;
x = x > 0.008856 ? exp(log(x) / 3.0) : (7.787 * x + 0.13793);
y = y > 0.008856 ? y3 : 7.787 * y + 0.13793;
z = z > 0.008856 ? z /= exp(log(z) / 3.0) : (7.787 * z + 0.13793);
l = y > 0.008856 ? (116.0 * y3 - 16.0) : 903.3 * y;
a = (x - y) * 500.0;
b = (y - z) * 200.0;
float4 fPixel;
fPixel.x = l;
fPixel.y = a;
fPixel.z = b;
fPixel.w = 0;
surf2Dwrite(fPixel, outputImg, px * 16, py);
}
}
#endif
#if __CUDA_ARCH__<300
__global__ void k_initClusters(float* clusters, int width, int height, int nSpxPerRow, int nSpxPerCol){
int idx_c = blockIdx.x*blockDim.x + threadIdx.x, idx_c5 = idx_c * 5;
int nSpx = nSpxPerCol*nSpxPerRow;
if (idx_c<nSpx){
int wSpx = width / nSpxPerRow, hSpx = height / nSpxPerCol;
int i = idx_c / nSpxPerRow;
int j = idx_c%nSpxPerRow;
int x = j*wSpx + wSpx / 2;
int y = i*hSpx + hSpx / 2;
float4 color;
surf2Dread(&color, frameLab_surf, x * 16, y);
clusters[idx_c5] = color.x;
clusters[idx_c5 + 1] = color.y;
clusters[idx_c5 + 2] = color.z;
clusters[idx_c5 + 3] = x;
clusters[idx_c5 + 4] = y;
}
}
#else
__global__ void k_initClusters(cudaSurfaceObject_t frameLab, float* clusters, int width, int height, int nSpxPerRow, int nSpxPerCol){
int idx_c = blockIdx.x*blockDim.x + threadIdx.x, idx_c5 = idx_c * 5;
int nSpx = nSpxPerCol*nSpxPerRow;
if (idx_c<nSpx){
int wSpx = width / nSpxPerRow, hSpx = height / nSpxPerCol;
int i = idx_c / nSpxPerRow;
int j = idx_c%nSpxPerRow;
int x = j*wSpx + wSpx / 2;
int y = i*hSpx + hSpx / 2;
float4 color;
surf2Dread(&color, frameLab, x * 16, y);
clusters[idx_c5] = color.x;
clusters[idx_c5 + 1] = color.y;
clusters[idx_c5 + 2] = color.z;
clusters[idx_c5 + 3] = x;
clusters[idx_c5 + 4] = y;
}
}
#endif
#if __CUDA_ARCH__<300
__global__ void k_assignement(int width, int height, int wSpx, int hSpx, float* clusters, float* accAtt_g, float wc2){
// gather NNEIGH surrounding clusters
__shared__ float4 sharedLab[NNEIGH][NNEIGH];
__shared__ float2 sharedXY[NNEIGH][NNEIGH];
int nClustPerRow = width / wSpx;
int nn2 = NNEIGH / 2;
if (threadIdx.x<NNEIGH && threadIdx.y<NNEIGH)
{
int id_x = threadIdx.x - nn2;
int id_y = threadIdx.y - nn2;
int clustLinIdx = blockIdx.x + id_y*nClustPerRow + id_x;
if (clustLinIdx >= 0 && clustLinIdx<gridDim.x)
{
int clustLinIdx5 = clustLinIdx * 5;
sharedLab[threadIdx.y][threadIdx.x].x = clusters[clustLinIdx5];
sharedLab[threadIdx.y][threadIdx.x].y = clusters[clustLinIdx5 + 1];
sharedLab[threadIdx.y][threadIdx.x].z = clusters[clustLinIdx5 + 2];
sharedXY[threadIdx.y][threadIdx.x].x = clusters[clustLinIdx5 + 3];
sharedXY[threadIdx.y][threadIdx.x].y = clusters[clustLinIdx5 + 4];
}
else
{
sharedLab[threadIdx.y][threadIdx.x].x = -1;
}
}
__syncthreads();
// Find nearest neighbour
float areaSpx = wSpx*hSpx;
float distanceMin = MAX_DIST;
float labelMin = -1;
int px_in_grid = blockIdx.x*blockDim.x + threadIdx.x;
int py_in_grid = blockIdx.y*blockDim.y + threadIdx.y;
int px = px_in_grid%width;
if (py_in_grid<hSpx && px<width)
{
int py = py_in_grid + px_in_grid / width*hSpx;
//int pxpy = py*width+px;
float4 color;
surf2Dread(&color, frameLab_surf, px * 16, py);
//float3 px_Lab = make_float3(frameLab[pxpy].x,frameLab[pxpy].y,frameLab[pxpy].z);
float3 px_Lab = make_float3(color.x, color.y, color.z);
float2 px_xy = make_float2(px, py);
for (int i = 0; i<NNEIGH; i++)
{
for (int j = 0; j<NNEIGH; j++)
{
if (sharedLab[i][j].x != -1)
{
float2 cluster_xy = make_float2(sharedXY[i][j].x, sharedXY[i][j].y);
float3 cluster_Lab = make_float3(sharedLab[i][j].x, sharedLab[i][j].y, sharedLab[i][j].z);
float2 px_c_xy = px_xy - cluster_xy;
float3 px_c_Lab = px_Lab - cluster_Lab;
float distTmp = fminf(computeDistance(px_c_xy, px_c_Lab, areaSpx, wc2), distanceMin);
if (distTmp != distanceMin){
distanceMin = distTmp;
labelMin = blockIdx.x + (i - nn2)*nClustPerRow + (j - nn2);
}
}
}
}
surf2Dwrite(labelMin, labels_surf, px * 4, py);
int labelMin6 = int(labelMin * 6);
atomicAdd(&accAtt_g[labelMin6], px_Lab.x);
atomicAdd(&accAtt_g[labelMin6 + 1], px_Lab.y);
atomicAdd(&accAtt_g[labelMin6 + 2], px_Lab.z);
atomicAdd(&accAtt_g[labelMin6 + 3], px);
atomicAdd(&accAtt_g[labelMin6 + 4], py);
atomicAdd(&accAtt_g[labelMin6 + 5], 1); //counter
}
}
#else
__global__ void k_assignement(int width, int height, int wSpx, int hSpx, cudaSurfaceObject_t frameLab, cudaSurfaceObject_t labels, float* clusters, float* accAtt_g, float wc2){
// gather NNEIGH surrounding clusters
__shared__ float4 sharedLab[NNEIGH][NNEIGH];
__shared__ float2 sharedXY[NNEIGH][NNEIGH];
int nClustPerRow = width / wSpx;
int nn2 = NNEIGH / 2;
if (threadIdx.x<NNEIGH && threadIdx.y<NNEIGH)
{
int id_x = threadIdx.x - nn2;
int id_y = threadIdx.y - nn2;
int clustLinIdx = blockIdx.x + id_y*nClustPerRow + id_x;
if (clustLinIdx >= 0 && clustLinIdx<gridDim.x)
{
int clustLinIdx5 = clustLinIdx * 5;
sharedLab[threadIdx.y][threadIdx.x].x = clusters[clustLinIdx5];
sharedLab[threadIdx.y][threadIdx.x].y = clusters[clustLinIdx5 + 1];
sharedLab[threadIdx.y][threadIdx.x].z = clusters[clustLinIdx5 + 2];
sharedXY[threadIdx.y][threadIdx.x].x = clusters[clustLinIdx5 + 3];
sharedXY[threadIdx.y][threadIdx.x].y = clusters[clustLinIdx5 + 4];
}
else
{
sharedLab[threadIdx.y][threadIdx.x].x = -1;
}
}
__syncthreads();
// Find nearest neighbour
float areaSpx = wSpx*hSpx;
float distanceMin = MAX_DIST;
float labelMin = -1;
int px_in_grid = blockIdx.x*blockDim.x + threadIdx.x;
int py_in_grid = blockIdx.y*blockDim.y + threadIdx.y;
int px = px_in_grid%width;
if (py_in_grid<hSpx && px<width)
{
int py = py_in_grid + px_in_grid / width*hSpx;
int pxpy = py*width + px;
float4 color;
surf2Dread(&color, frameLab, px * 16, py);
//float3 px_Lab = make_float3(frameLab[pxpy].x,frameLab[pxpy].y,frameLab[pxpy].z);
float3 px_Lab = make_float3(color.x, color.y, color.z);
float2 px_xy = make_float2(px, py);
for (int i = 0; i<NNEIGH; i++)
{
for (int j = 0; j<NNEIGH; j++)
{
if (sharedLab[i][j].x != -1)
{
float2 cluster_xy = make_float2(sharedXY[i][j].x, sharedXY[i][j].y);
float3 cluster_Lab = make_float3(sharedLab[i][j].x, sharedLab[i][j].y, sharedLab[i][j].z);
float2 px_c_xy = px_xy - cluster_xy;
float3 px_c_Lab = px_Lab - cluster_Lab;
float distTmp = fminf(computeDistance(px_c_xy, px_c_Lab, areaSpx, wc2), distanceMin);
if (distTmp != distanceMin){
distanceMin = distTmp;
labelMin = blockIdx.x + (i - nn2)*nClustPerRow + (j - nn2);
}
}
}
}
surf2Dwrite(labelMin, labels, px * 4, py);
int labelMin6 = int(labelMin * 6);
atomicAdd(&accAtt_g[labelMin6], px_Lab.x);
atomicAdd(&accAtt_g[labelMin6 + 1], px_Lab.y);
atomicAdd(&accAtt_g[labelMin6 + 2], px_Lab.z);
atomicAdd(&accAtt_g[labelMin6 + 3], px);
atomicAdd(&accAtt_g[labelMin6 + 4], py);
atomicAdd(&accAtt_g[labelMin6 + 5], 1); //counter
}
}
#endif
__global__ void k_update(int nSpx, float* clusters, float* accAtt_g)
{
int cluster_idx = blockIdx.x*blockDim.x + threadIdx.x;
if (cluster_idx<nSpx)
{
uint cluster_idx6 = cluster_idx * 6;
uint cluster_idx5 = cluster_idx * 5;
int counter = accAtt_g[cluster_idx6 + 5];
if (counter != 0){
clusters[cluster_idx5] = accAtt_g[cluster_idx6] / counter;
clusters[cluster_idx5 + 1] = accAtt_g[cluster_idx6 + 1] / counter;
clusters[cluster_idx5 + 2] = accAtt_g[cluster_idx6 + 2] / counter;
clusters[cluster_idx5 + 3] = accAtt_g[cluster_idx6 + 3] / counter;
clusters[cluster_idx5 + 4] = accAtt_g[cluster_idx6 + 4] / counter;
//reset accumulator
accAtt_g[cluster_idx6] = 0;
accAtt_g[cluster_idx6 + 1] = 0;
accAtt_g[cluster_idx6 + 2] = 0;
accAtt_g[cluster_idx6 + 3] = 0;
accAtt_g[cluster_idx6 + 4] = 0;
accAtt_g[cluster_idx6 + 5] = 0;
}
}
}
//============== wrapper =================
__host__ void SLIC_cuda::InitBuffers() {
//allocate buffers on gpu
//gpuErrchk(cudaMalloc((void**)&frameBGRA_g, m_nPx*sizeof(uchar4))); //4 channels for padding
cudaChannelFormatDesc channelDescr = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned);
gpuErrchk(cudaMallocArray(&frameBGRA_array, &channelDescr, m_width, m_height));
cudaChannelFormatDesc channelDescrLab = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);
gpuErrchk(cudaMallocArray(&frameLab_array, &channelDescrLab, m_width, m_height, cudaArraySurfaceLoadStore));
cudaChannelFormatDesc channelDescrLabels = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
gpuErrchk(cudaMallocArray(&labels_array, &channelDescrLabels, m_width, m_height, cudaArraySurfaceLoadStore));
//texture FrameBGR (read-only)
#if __CUDA_ARCH__>=300
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = frameBGRA_array;
// Specify texture object parameters
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = false;
gpuErrchk(cudaCreateTextureObject(&frameBGRA_tex, &resDesc, &texDesc, NULL));
#else
frameBGRA_tex.addressMode[0] = cudaAddressModeClamp;
frameBGRA_tex.addressMode[1] = cudaAddressModeClamp;
frameBGRA_tex.filterMode = cudaFilterModePoint;
frameBGRA_tex.normalized = false;
cudaBindTextureToArray(&frameBGRA_tex, frameBGRA_array, &channelDescr);
#endif
// surface frameLab
#if __CUDA_ARCH__>=300
cudaResourceDesc resDescLab;
memset(&resDescLab, 0, sizeof(resDescLab));
resDescLab.resType = cudaResourceTypeArray;
resDescLab.res.array.array = frameLab_array;
gpuErrchk(cudaCreateSurfaceObject(&frameLab_surf, &resDescLab));
#else
cudaBindSurfaceToArray(&frameLab_surf, frameLab_array, &channelDescrLab);
#endif
// surface labels
#if __CUDA_ARCH__>=300
cudaResourceDesc resDescLabels;
memset(&resDescLabels, 0, sizeof(resDescLabels));
resDescLabels.resType = cudaResourceTypeArray;
resDescLabels.res.array.array = labels_array;
gpuErrchk(cudaCreateSurfaceObject(&labels_surf, &resDescLabels));
#else
cudaBindSurfaceToArray(&labels_surf, labels_array, &channelDescrLabels);
#endif
// buffers clusters , accAtt
gpuErrchk(cudaMalloc((void**)&clusters_g, m_nSpx*sizeof(float)* 5)); // 5-D centroid
gpuErrchk(cudaMalloc((void**)&accAtt_g, m_nSpx*sizeof(float)* 6)); // 5-D centroid acc + 1 counter
cudaMemset(accAtt_g, 0, m_nSpx*sizeof(float)* 6);//initialize accAtt to 0
}
#if __CUDA_ARCH__>=300
__host__ void SLIC_cuda::Rgb2CIELab(cudaTextureObject_t inputImg, cudaSurfaceObject_t outputImg, int width, int height)
{
int side = 16;
dim3 threadsPerBlock(side, side);
dim3 numBlocks(iDivUp(m_width, side), iDivUp(m_height, side));
kRgb2CIELab << <numBlocks, threadsPerBlock >> >(inputImg, outputImg, width, height);
}
#else
__host__ void SLIC_cuda::Rgb2CIELab(int width, int height)
{
int side = 16;
dim3 threadsPerBlock(side, side);
dim3 numBlocks(iDivUp(m_width, side), iDivUp(m_height, side));
kRgb2CIELab << <numBlocks, threadsPerBlock >> >(width, height);
}
#endif
__host__ void SLIC_cuda::InitClusters()
{
dim3 threadsPerBlock(NMAX_THREAD);
dim3 numBlocks(iDivUp(m_nSpx, NMAX_THREAD));
#if __CUDA_ARCH__>=300
k_initClusters << <numBlocks, threadsPerBlock >> >(frameLab_surf, clusters_g, m_width, m_height, m_width / m_wSpx, m_height / m_hSpx);
#else
k_initClusters << <numBlocks, threadsPerBlock >> >(clusters_g, m_width, m_height, m_width / m_wSpx, m_height / m_hSpx);
#endif
}
__host__ void SLIC_cuda::Assignement() {
int hMax = NMAX_THREAD / m_hSpx;
int nBlockPerClust = iDivUp(m_hSpx, hMax);
dim3 blockPerGrid(m_nSpx, nBlockPerClust);
dim3 threadPerBlock(m_wSpx, std::min(m_hSpx, hMax));
CV_Assert(threadPerBlock.x >= 3 && threadPerBlock.y >= 3);
float wc2 = m_wc * m_wc;
#if __CUDA_ARCH__>=300
k_assignement << < blockPerGrid, threadPerBlock >> >(m_width, m_height, m_wSpx, m_hSpx, frameLab_surf, labels_surf, clusters_g, accAtt_g, wc2);
#else
k_assignement << < blockPerGrid, threadPerBlock >> >(m_width, m_height, m_wSpx, m_hSpx, clusters_g, accAtt_g, wc2);
#endif
}
__host__ void SLIC_cuda::Update()
{
dim3 threadsPerBlock(NMAX_THREAD);
dim3 numBlocks(iDivUp(m_nSpx, NMAX_THREAD));
k_update << <numBlocks, threadsPerBlock >> >(m_nSpx, clusters_g, accAtt_g);
}
|
5c749c4eea7d0b23ba8eda9b27b4f37f820bd989.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <math.h>
__global__ void matmul_tile(float *a, float *b, float *c, int n, int m, int p, int TW) {
extern __shared__ float bigarray[];
float *aTile=&bigarray[0], *bTile=&bigarray[TW*TW];
int tx = threadIdx.x;
int ty = threadIdx.y;
float cvalue = 0;
int col = tx + blockDim.x * blockIdx.x;
int row = ty + blockDim.y * blockIdx.y;
int tileNum, aIdx, bIdx;
tileNum = p/TW + (p % TW != 0);
for (int tileIdx=0; tileIdx<tileNum; tileIdx++) {
aIdx = tileIdx*TW + tx;
if(aIdx >= p || row >= n){
aTile[ty*TW+tx] = 0.;
}else{
aTile[ty*TW+tx] = a[row*p + aIdx]; //Copy to shared memory
}
bIdx = tileIdx*TW +ty;
if(bIdx >= p || col >= m){
bTile[ty*TW+tx] = 0.;
}else{
bTile[ty*TW+tx] = b[bIdx*m + col]; //Copy to shared memory
}
__syncthreads();
for (int k=0; k<TW; k++){
cvalue += aTile[ty*TW+k] * bTile[k*TW+tx];
//printf("bx = %d, by = %d, tx = %d, ty = %d: a=%.2f b=%.2f\n",blockIdx.x, blockIdx.y, tx, ty, aTile[ty*TW+k],bTile[k*TW+tx]);
}
__syncthreads();
}
if(row < n && col <m){
c[row*m + col] = cvalue;
}
}
void cpu_matrixmult(float *a,float *b, float *c, int n, int m, int p) {
int index, indexa, indexb;
float cvalue;
for(int col=0;col < m; col++)
for(int row=0;row < n; row++) {
indexb = col;
index = row * m + col;
cvalue = 0.;
for (indexa = row*p; indexa < (row*p + p); indexa++, indexb+=m)
cvalue += a[indexa]*b[indexb];
c[index] -= cvalue; //NOTE: This calculates the diff between CPU and GPU computations.
}
}
int main(int argc, char *argv[]) {
int i, j; // loop counters
int gpucount = 0; // Count of available GPUs
int Grid_Dim_x = 1; //Grid dimension, x
int Grid_Dim_y = 1; //Grid dimension, y
int Block_Dim_x = 1; //Block dimension, x
int Block_Dim_y = 1; //Block dimension, y
int TW = 1;
int n,m,p; // matrix dimension
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
int size_a, size_b, size_c; // number of bytes in arrays
hipEvent_t start, stop; // using cuda events to measure time
float elapsed_time_ms; // which is applicable for asynchronous code also
hipError_t errorcode;
// --------------------SET PARAMETERS AND DATA -----------------------
errorcode = hipGetDeviceCount(&gpucount);
if (errorcode == hipErrorNoDevice) {
printf("No GPUs are visible\n");
exit(-1);
}
//else printf("Device count = %d\n",gpucount);
if (argc<9) {
printf("# of inputs: %d\n", argc);
printf("Usage: Task1GPUsp <n> <m> <p> <block dim x> <block dim y> <grid dim x> <grid dim y> <tile width>\n");
exit (-1);
}
n = atoi(argv[1]);
m = atoi(argv[2]);
p = atoi(argv[3]);
Block_Dim_x = atoi(argv[4]); // non-Square block, # of rows
Block_Dim_y = atoi(argv[5]); // non-Square block, # of cols
if (Block_Dim_x * Block_Dim_y > 1024) {
printf("Error, too many threads in block\n");
exit (-1);
}
//not really used in Task2
Grid_Dim_x = atoi(argv[6]); // non-Square grid, # of rows
Grid_Dim_y = atoi(argv[7]); // non-Square grid, # of cols
TW = atoi(argv[8]);
if(Block_Dim_x != Block_Dim_y || Block_Dim_x != TW || Block_Dim_y != TW){
printf("Error, bx, by, tw must be equal\n");
exit(-1);
}
//printf("A Matrix Dimension = %dx%d\n",n,p);
//printf("B Matrix Dimension = %dx%d\n",p,m);
//printf("C Matrix Dimension = %dx%d\n",n,m);
Grid_Dim_x = m/Block_Dim_x + (m % Block_Dim_x != 0);
Grid_Dim_y = n/Block_Dim_y + (n % Block_Dim_y != 0);
//printf("Grid_x = %d Grid_y = %d\n", Grid_Dim_x,Grid_Dim_y);
dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure
dim3 Block(Block_Dim_x, Block_Dim_y); //Block structure
size_a = n * p * sizeof(float); // number of bytes in total in arrays
size_b = p * m * sizeof(float); // number of bytes in total in arrays
size_c = n * m * sizeof(float); // number of bytes in total in arrays
a = (float*) malloc(size_a); // dynamically allocated memory for arrays on host
b = (float*) malloc(size_b);
c = (float*) malloc(size_c); // results from GPU
srand(12345);
//int p = n; //Used here only to illustrate proper initialization for non-square case
//printf ("a\n");
for(i=0;i < n;i++){
for(j=0;j < p;j++) {
a[i * p + j] = (float) rand() / (float) RAND_MAX;
//a[i * p + j] = (float) (i+j);
//printf("%.2f ", a[i * p + j]);
}
//printf("\n");
}
//printf("b\n");
for(i=0;i < p;i++){
for(j=0;j < m;j++) {
b[i * m + j] = (float) rand() / (float) RAND_MAX;
//b[i * m + j] = (float) (i+j);
//printf("%.2f ", b[i * m + j]);
}
//printf("\n");
}
// ------------- COMPUTATION DONE ON GPU ----------------------------
hipMalloc((void**)&dev_a, size_a); // allocate memory on device
hipMalloc((void**)&dev_b, size_b);
hipMalloc((void**)&dev_c, size_c);
hipMemcpy(dev_a, a , size_a ,hipMemcpyHostToDevice);
hipMemcpy(dev_b, b , size_b ,hipMemcpyHostToDevice);
hipEventCreate(&start); // instrument code to measure start time
hipEventCreate(&stop);
hipEventRecord(start, 0);
// hipEventSynchronize(start); // not needed
size_t Ns = 2 * TW*TW * sizeof(float);
hipLaunchKernelGGL(( matmul_tile), dim3(Grid),dim3(Block), Ns, 0, dev_a,dev_b,dev_c,n,m,p,TW);
hipEventRecord(stop, 0); // instrument code to measure end time
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms, start, stop );
hipMemcpy(c,dev_c, size_c ,hipMemcpyDeviceToHost);
//printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time
//printf("c\n");
for(i=0;i < n;i++){
for(j=0;j < m;j++) {
printf("%.2f ", c[i * m + j]);
}
printf("\n");
}
// ------------- COMPUTATION DONE ON HOST CPU ----------------------------
// DEBUGGING USE ONLY (AND FOR LIMITED NUMBERS OF TIMING RUNS)
/*
hipEventRecord(start, 0); // use same timing
// hipEventSynchronize(start); // not needed
cpu_matrixmult(a,b,c, n,m,p); // do calculation on host (NOTE: This computes the diff with GPU result.)
hipEventRecord(stop, 0); // instrument code to measue end time
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms, start, stop );
printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time
// ------------------- check device creates correct results -----------------
double error, suma, sumb, sumc, ai, bi, ci;
suma = 0.; sumb = 0; sumc = 0;
for(i=0;i < n*n;i++) {
ai = (double) a[i];
bi = (double) b[i];
ci = (double) c[i];
suma += ai*ai;
sumb += bi*bi;
sumc += ci*ci;
}
suma = sqrt(suma);
sumb = sqrt(sumb);
sumc = sqrt(sumc);
error = sumc/(n*suma*sumb);
printf("Scaled error between GPU and CPU: %e\n", error);
*/
// -------------- clean up ---------------------------------------
free(a);
free(b);
free(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 5c749c4eea7d0b23ba8eda9b27b4f37f820bd989.cu | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <math.h>
__global__ void matmul_tile(float *a, float *b, float *c, int n, int m, int p, int TW) {
extern __shared__ float bigarray[];
float *aTile=&bigarray[0], *bTile=&bigarray[TW*TW];
int tx = threadIdx.x;
int ty = threadIdx.y;
float cvalue = 0;
int col = tx + blockDim.x * blockIdx.x;
int row = ty + blockDim.y * blockIdx.y;
int tileNum, aIdx, bIdx;
tileNum = p/TW + (p % TW != 0);
for (int tileIdx=0; tileIdx<tileNum; tileIdx++) {
aIdx = tileIdx*TW + tx;
if(aIdx >= p || row >= n){
aTile[ty*TW+tx] = 0.;
}else{
aTile[ty*TW+tx] = a[row*p + aIdx]; //Copy to shared memory
}
bIdx = tileIdx*TW +ty;
if(bIdx >= p || col >= m){
bTile[ty*TW+tx] = 0.;
}else{
bTile[ty*TW+tx] = b[bIdx*m + col]; //Copy to shared memory
}
__syncthreads();
for (int k=0; k<TW; k++){
cvalue += aTile[ty*TW+k] * bTile[k*TW+tx];
//printf("bx = %d, by = %d, tx = %d, ty = %d: a=%.2f b=%.2f\n",blockIdx.x, blockIdx.y, tx, ty, aTile[ty*TW+k],bTile[k*TW+tx]);
}
__syncthreads();
}
if(row < n && col <m){
c[row*m + col] = cvalue;
}
}
void cpu_matrixmult(float *a,float *b, float *c, int n, int m, int p) {
int index, indexa, indexb;
float cvalue;
for(int col=0;col < m; col++)
for(int row=0;row < n; row++) {
indexb = col;
index = row * m + col;
cvalue = 0.;
for (indexa = row*p; indexa < (row*p + p); indexa++, indexb+=m)
cvalue += a[indexa]*b[indexb];
c[index] -= cvalue; //NOTE: This calculates the diff between CPU and GPU computations.
}
}
int main(int argc, char *argv[]) {
int i, j; // loop counters
int gpucount = 0; // Count of available GPUs
int Grid_Dim_x = 1; //Grid dimension, x
int Grid_Dim_y = 1; //Grid dimension, y
int Block_Dim_x = 1; //Block dimension, x
int Block_Dim_y = 1; //Block dimension, y
int TW = 1;
int n,m,p; // matrix dimension
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
int size_a, size_b, size_c; // number of bytes in arrays
cudaEvent_t start, stop; // using cuda events to measure time
float elapsed_time_ms; // which is applicable for asynchronous code also
cudaError_t errorcode;
// --------------------SET PARAMETERS AND DATA -----------------------
errorcode = cudaGetDeviceCount(&gpucount);
if (errorcode == cudaErrorNoDevice) {
printf("No GPUs are visible\n");
exit(-1);
}
//else printf("Device count = %d\n",gpucount);
if (argc<9) {
printf("# of inputs: %d\n", argc);
printf("Usage: Task1GPUsp <n> <m> <p> <block dim x> <block dim y> <grid dim x> <grid dim y> <tile width>\n");
exit (-1);
}
n = atoi(argv[1]);
m = atoi(argv[2]);
p = atoi(argv[3]);
Block_Dim_x = atoi(argv[4]); // non-Square block, # of rows
Block_Dim_y = atoi(argv[5]); // non-Square block, # of cols
if (Block_Dim_x * Block_Dim_y > 1024) {
printf("Error, too many threads in block\n");
exit (-1);
}
//not really used in Task2
Grid_Dim_x = atoi(argv[6]); // non-Square grid, # of rows
Grid_Dim_y = atoi(argv[7]); // non-Square grid, # of cols
TW = atoi(argv[8]);
if(Block_Dim_x != Block_Dim_y || Block_Dim_x != TW || Block_Dim_y != TW){
printf("Error, bx, by, tw must be equal\n");
exit(-1);
}
//printf("A Matrix Dimension = %dx%d\n",n,p);
//printf("B Matrix Dimension = %dx%d\n",p,m);
//printf("C Matrix Dimension = %dx%d\n",n,m);
Grid_Dim_x = m/Block_Dim_x + (m % Block_Dim_x != 0);
Grid_Dim_y = n/Block_Dim_y + (n % Block_Dim_y != 0);
//printf("Grid_x = %d Grid_y = %d\n", Grid_Dim_x,Grid_Dim_y);
dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure
dim3 Block(Block_Dim_x, Block_Dim_y); //Block structure
size_a = n * p * sizeof(float); // number of bytes in total in arrays
size_b = p * m * sizeof(float); // number of bytes in total in arrays
size_c = n * m * sizeof(float); // number of bytes in total in arrays
a = (float*) malloc(size_a); // dynamically allocated memory for arrays on host
b = (float*) malloc(size_b);
c = (float*) malloc(size_c); // results from GPU
srand(12345);
//int p = n; //Used here only to illustrate proper initialization for non-square case
//printf ("a\n");
for(i=0;i < n;i++){
for(j=0;j < p;j++) {
a[i * p + j] = (float) rand() / (float) RAND_MAX;
//a[i * p + j] = (float) (i+j);
//printf("%.2f ", a[i * p + j]);
}
//printf("\n");
}
//printf("b\n");
for(i=0;i < p;i++){
for(j=0;j < m;j++) {
b[i * m + j] = (float) rand() / (float) RAND_MAX;
//b[i * m + j] = (float) (i+j);
//printf("%.2f ", b[i * m + j]);
}
//printf("\n");
}
// ------------- COMPUTATION DONE ON GPU ----------------------------
cudaMalloc((void**)&dev_a, size_a); // allocate memory on device
cudaMalloc((void**)&dev_b, size_b);
cudaMalloc((void**)&dev_c, size_c);
cudaMemcpy(dev_a, a , size_a ,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b , size_b ,cudaMemcpyHostToDevice);
cudaEventCreate(&start); // instrument code to measure start time
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// cudaEventSynchronize(start); // not needed
size_t Ns = 2 * TW*TW * sizeof(float);
matmul_tile<<<Grid,Block, Ns>>>(dev_a,dev_b,dev_c,n,m,p,TW);
cudaEventRecord(stop, 0); // instrument code to measure end time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop );
cudaMemcpy(c,dev_c, size_c ,cudaMemcpyDeviceToHost);
//printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time
//printf("c\n");
for(i=0;i < n;i++){
for(j=0;j < m;j++) {
printf("%.2f ", c[i * m + j]);
}
printf("\n");
}
// ------------- COMPUTATION DONE ON HOST CPU ----------------------------
// DEBUGGING USE ONLY (AND FOR LIMITED NUMBERS OF TIMING RUNS)
/*
cudaEventRecord(start, 0); // use same timing
// cudaEventSynchronize(start); // not needed
cpu_matrixmult(a,b,c, n,m,p); // do calculation on host (NOTE: This computes the diff with GPU result.)
cudaEventRecord(stop, 0); // instrument code to measue end time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop );
printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time
// ------------------- check device creates correct results -----------------
double error, suma, sumb, sumc, ai, bi, ci;
suma = 0.; sumb = 0; sumc = 0;
for(i=0;i < n*n;i++) {
ai = (double) a[i];
bi = (double) b[i];
ci = (double) c[i];
suma += ai*ai;
sumb += bi*bi;
sumc += ci*ci;
}
suma = sqrt(suma);
sumb = sqrt(sumb);
sumc = sqrt(sumc);
error = sumc/(n*suma*sumb);
printf("Scaled error between GPU and CPU: %e\n", error);
*/
// -------------- clean up ---------------------------------------
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
f2d90ea32476864457b15466777fb1038fe68472.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<bool assign_if, typename C, typename T>
__global__ void AssignGpu(int64_t elem_cnt, const C* condition, const T* value, T* ref) {
if (assign_if == (*condition == 0)) { return; }
CUDA_1D_KERNEL_LOOP(i, elem_cnt) { ref[i] = value[i]; }
}
template<bool assign_if, typename C, typename T>
class AssignIfGPUKernel final : public user_op::OpKernel {
public:
AssignIfGPUKernel() = default;
~AssignIfGPUKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* condition = ctx->Tensor4ArgNameAndIndex("condition", 0);
CHECK_EQ(condition->shape_view().NumAxes(), 1);
CHECK_EQ(condition->shape_view().At(0), 1);
const user_op::Tensor* value = ctx->Tensor4ArgNameAndIndex("value", 0);
user_op::Tensor* ref = ctx->Tensor4ArgNameAndIndex("ref", 0);
if (value->dptr() == ref->dptr()) { return; }
CHECK_EQ(value->shape_view(), ref->shape_view());
CHECK_EQ(value->data_type(), ref->data_type());
const size_t elem_cnt = ref->shape_view().elem_cnt();
hipLaunchKernelGGL(( AssignGpu<assign_if, C, T>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->stream()->As<ep::CudaStream>()->cuda_stream(),
elem_cnt, condition->dptr<C>(), value->dptr<T>(), ref->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return true; }
};
} // namespace
#define REGISTER_ASSIGN_WITH_CONDITION_VALUE_CUDA_KERNEL(op_type_name, assign_if, condition_type, \
value_type) \
REGISTER_USER_KERNEL(op_type_name) \
.SetCreateFn<AssignIfGPUKernel<assign_if, condition_type, value_type>>() \
.SetIsMatchedHob( \
(user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("condition", 0) == GetDataType<condition_type>::value) \
&& (user_op::HobDataType("value", 0) == GetDataType<value_type>::value));
#define REGISTER_ASSIGN_IF_CUDA_KERNEL(condition_type, value_type) \
REGISTER_ASSIGN_WITH_CONDITION_VALUE_CUDA_KERNEL( \
"assign_if", true, OF_PP_PAIR_FIRST(condition_type), OF_PP_PAIR_FIRST(value_type)); \
REGISTER_ASSIGN_WITH_CONDITION_VALUE_CUDA_KERNEL( \
"assign_if_not", false, OF_PP_PAIR_FIRST(condition_type), OF_PP_PAIR_FIRST(value_type))
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_ASSIGN_IF_CUDA_KERNEL, INT_DATA_TYPE_SEQ,
POD_DATA_TYPE_SEQ)
} // namespace oneflow
| f2d90ea32476864457b15466777fb1038fe68472.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<bool assign_if, typename C, typename T>
__global__ void AssignGpu(int64_t elem_cnt, const C* condition, const T* value, T* ref) {
if (assign_if == (*condition == 0)) { return; }
CUDA_1D_KERNEL_LOOP(i, elem_cnt) { ref[i] = value[i]; }
}
template<bool assign_if, typename C, typename T>
class AssignIfGPUKernel final : public user_op::OpKernel {
public:
AssignIfGPUKernel() = default;
~AssignIfGPUKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* condition = ctx->Tensor4ArgNameAndIndex("condition", 0);
CHECK_EQ(condition->shape_view().NumAxes(), 1);
CHECK_EQ(condition->shape_view().At(0), 1);
const user_op::Tensor* value = ctx->Tensor4ArgNameAndIndex("value", 0);
user_op::Tensor* ref = ctx->Tensor4ArgNameAndIndex("ref", 0);
if (value->dptr() == ref->dptr()) { return; }
CHECK_EQ(value->shape_view(), ref->shape_view());
CHECK_EQ(value->data_type(), ref->data_type());
const size_t elem_cnt = ref->shape_view().elem_cnt();
AssignGpu<assign_if, C, T><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>(
elem_cnt, condition->dptr<C>(), value->dptr<T>(), ref->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return true; }
};
} // namespace
#define REGISTER_ASSIGN_WITH_CONDITION_VALUE_CUDA_KERNEL(op_type_name, assign_if, condition_type, \
value_type) \
REGISTER_USER_KERNEL(op_type_name) \
.SetCreateFn<AssignIfGPUKernel<assign_if, condition_type, value_type>>() \
.SetIsMatchedHob( \
(user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("condition", 0) == GetDataType<condition_type>::value) \
&& (user_op::HobDataType("value", 0) == GetDataType<value_type>::value));
#define REGISTER_ASSIGN_IF_CUDA_KERNEL(condition_type, value_type) \
REGISTER_ASSIGN_WITH_CONDITION_VALUE_CUDA_KERNEL( \
"assign_if", true, OF_PP_PAIR_FIRST(condition_type), OF_PP_PAIR_FIRST(value_type)); \
REGISTER_ASSIGN_WITH_CONDITION_VALUE_CUDA_KERNEL( \
"assign_if_not", false, OF_PP_PAIR_FIRST(condition_type), OF_PP_PAIR_FIRST(value_type))
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_ASSIGN_IF_CUDA_KERNEL, INT_DATA_TYPE_SEQ,
POD_DATA_TYPE_SEQ)
} // namespace oneflow
|
6fdf8d4d42c96cb136912a422ce7071f909de69d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void transpose(int *a, int *t) {
int n = threadIdx.x, m = blockIdx.x, size = blockDim.x, size1 = gridDim.x;
t[n*size1 + m] = a[m*size+n];
}
int main (void) {
int *a, *t, m, n;
int *d_a, *d_t;
printf("Enter the value of m: \n");
scanf("%d", &m);
printf("Enter the value of n: \n");
scanf("%d", &n);
int size = sizeof(int)*m*n;
a = (int*)malloc(m*n*sizeof(int));
t = (int*)malloc(m*n*sizeof(int));
printf("Enter the matrix: \n");
for (int i = 0; i < m*n; ++i)
{
scanf("%d", &a[i]);
}
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_t, size);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( transpose), dim3(m),dim3(n), 0, 0, d_a, d_t);
hipMemcpy(t, d_t, size, hipMemcpyDeviceToHost);
printf("Result vectors is: \n");
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < m; ++j)
{
printf("%d\t", t[i*m+j]);
}
printf("\n");
}
getchar();
hipFree(d_a);
hipFree(d_t);
return 0;
}
| 6fdf8d4d42c96cb136912a422ce7071f909de69d.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void transpose(int *a, int *t) {
int n = threadIdx.x, m = blockIdx.x, size = blockDim.x, size1 = gridDim.x;
t[n*size1 + m] = a[m*size+n];
}
int main (void) {
int *a, *t, m, n;
int *d_a, *d_t;
printf("Enter the value of m: \n");
scanf("%d", &m);
printf("Enter the value of n: \n");
scanf("%d", &n);
int size = sizeof(int)*m*n;
a = (int*)malloc(m*n*sizeof(int));
t = (int*)malloc(m*n*sizeof(int));
printf("Enter the matrix: \n");
for (int i = 0; i < m*n; ++i)
{
scanf("%d", &a[i]);
}
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_t, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
transpose<<<m,n>>>(d_a, d_t);
cudaMemcpy(t, d_t, size, cudaMemcpyDeviceToHost);
printf("Result vectors is: \n");
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < m; ++j)
{
printf("%d\t", t[i*m+j]);
}
printf("\n");
}
getchar();
cudaFree(d_a);
cudaFree(d_t);
return 0;
}
|
f2739e91a9a6cf56cbcf453ae2089a465aff985d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
// to see the branch divergence we have to have conditions more than threshold (7 or 4 depend on complexity)
__global__ void mathkernel3(float * c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if (tid % 2 == 0)
{
a = 100.0f;
}
else if (tid % 3 == 0)
{
a = 200.0f;
}
else if (tid % 5 == 0)
{
a = 100;
}
else if (tid % 7 == 0)
{
a = 400.0f;
}
else if (tid % 9 == 0)
{
a = 500.0f;
}
else if (tid % 11 == 0)
{
a = 400;
}
else if (tid % 17 == 0)
{
a = 700.0f;
}
else if (tid % 19 == 0)
{
a = 300;
}
else if (tid % 23 == 0)
{
a = 200;
}
else if (tid % 29 == 0)
{
a = 1000.0f;
}
c[tid] = a + b;
}
__global__ void mathkernel2(float * c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
bool ipred = (tid % 2 == 0);
if (ipred)
{
a = 100.0f;
}
if(!ipred)
{
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathkernel1(float * c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if (tid%2 ==0)
{
a = 100.0f;
}
else
{
b = 200.0f;
}
c[tid] = a + b;
}
void run_code_divergence()
{
int array_size = 1024;
int byte_array_size = array_size * sizeof(float);
float * h_c, *h_ref;
h_c = (float*)malloc(byte_array_size);
h_ref = (float*)malloc(byte_array_size);
for (int i = 0; i < array_size; i++)
{
h_c[i] = 1;
}
float* d_c;
hipMalloc((float**)&d_c,byte_array_size);
hipMemcpy(d_c, h_c, byte_array_size, hipMemcpyHostToDevice);
dim3 grid(1);
dim3 block(array_size / grid.x);
mathkernel1 << <grid, block >> > (d_c);
hipDeviceSynchronize();
mathkernel2 << <grid, block >> > (d_c);
hipDeviceSynchronize();
//hipMemcpy(h_ref, d_c, byte_array_size, hipMemcpyDeviceToHost);
mathkernel3 << <grid, block >> > (d_c);
hipDeviceSynchronize();
hipFree(d_c);
free(h_ref);
free(h_c);
}
//int main()
//{
// run_code_divergence();
// system("pause");
// return 0;
//} | f2739e91a9a6cf56cbcf453ae2089a465aff985d.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
// to see the branch divergence we have to have conditions more than threshold (7 or 4 depend on complexity)
__global__ void mathkernel3(float * c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if (tid % 2 == 0)
{
a = 100.0f;
}
else if (tid % 3 == 0)
{
a = 200.0f;
}
else if (tid % 5 == 0)
{
a = 100;
}
else if (tid % 7 == 0)
{
a = 400.0f;
}
else if (tid % 9 == 0)
{
a = 500.0f;
}
else if (tid % 11 == 0)
{
a = 400;
}
else if (tid % 17 == 0)
{
a = 700.0f;
}
else if (tid % 19 == 0)
{
a = 300;
}
else if (tid % 23 == 0)
{
a = 200;
}
else if (tid % 29 == 0)
{
a = 1000.0f;
}
c[tid] = a + b;
}
__global__ void mathkernel2(float * c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
bool ipred = (tid % 2 == 0);
if (ipred)
{
a = 100.0f;
}
if(!ipred)
{
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathkernel1(float * c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if (tid%2 ==0)
{
a = 100.0f;
}
else
{
b = 200.0f;
}
c[tid] = a + b;
}
void run_code_divergence()
{
int array_size = 1024;
int byte_array_size = array_size * sizeof(float);
float * h_c, *h_ref;
h_c = (float*)malloc(byte_array_size);
h_ref = (float*)malloc(byte_array_size);
for (int i = 0; i < array_size; i++)
{
h_c[i] = 1;
}
float* d_c;
cudaMalloc((float**)&d_c,byte_array_size);
cudaMemcpy(d_c, h_c, byte_array_size, cudaMemcpyHostToDevice);
dim3 grid(1);
dim3 block(array_size / grid.x);
mathkernel1 << <grid, block >> > (d_c);
cudaDeviceSynchronize();
mathkernel2 << <grid, block >> > (d_c);
cudaDeviceSynchronize();
//cudaMemcpy(h_ref, d_c, byte_array_size, cudaMemcpyDeviceToHost);
mathkernel3 << <grid, block >> > (d_c);
cudaDeviceSynchronize();
cudaFree(d_c);
free(h_ref);
free(h_c);
}
//int main()
//{
// run_code_divergence();
// system("pause");
// return 0;
//} |
6b201deddda47f2c5f245da232715e80843453cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #include <iostream>
#include <cstdio>
#include <cutil.h>
#include "cuda_pointer.h"
#define NTHREAD 128
#define PROFILE
#ifdef PROFILE
#include <sys/time.h>
static double get_wtime(){
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + 1.e-6 * tv.tv_usec;
}
#else
static double get_wtime(){
return 0.0;
}
#endif
static float2 float2_split(double x){
const int shift = 20;
float2 ret;
x *= (1<<shift);
double xi = (int)x;
double xf = x - xi;
ret.x = xi * (1./(1<<shift));
ret.y = xf * (1./(1<<shift));
return ret;
}
static __device__ float2 float2_accum(float2 acc, float x){
float tmp = acc.x + x;
acc.y -= (tmp - acc.x) - x;
acc.x = tmp;
return acc;
}
static __device__ float2 float2_regularize(float2 acc){
float tmp = acc.x + acc.y;
acc.y = acc.y -(tmp - acc.x);
acc.x = tmp;
return acc;
}
struct Particle{
float2 pos[3];
float mass;
float pad;
Particle(double x[3], double m){
pos[0] = float2_split(x[0]);
pos[1] = float2_split(x[1]);
pos[2] = float2_split(x[2]);
mass = (float)m;
}
Particle(int){
pos[0].x = pos[0].y = pos[1].x = pos[1].y = pos[2].x = pos[2].y = mass = pad = 0.f;
}
__device__ Particle() {}
};
__global__ void pot_kernel(int n, Particle *ptcl, float2 *phi){
__shared__ Particle jpbuf[NTHREAD];
int i = NTHREAD * blockIdx.x + threadIdx.x;
Particle ip = ptcl[i];
float2 phii = make_float2(0.f, 0.f);
for(int j=0; j<n; j+= NTHREAD){
__syncthreads();
jpbuf[threadIdx.x] = ptcl[j + threadIdx.x];
__syncthreads();
#pragma unroll 4
for(int jj=0; jj<NTHREAD; jj++){
// if(j+jj == i) continue;
Particle &jp = jpbuf[jj];
float dx = (jp.pos[0].x - ip.pos[0].x) + (jp.pos[0].y - ip.pos[0].y);
float dy = (jp.pos[1].x - ip.pos[1].x) + (jp.pos[1].y - ip.pos[1].y);
float dz = (jp.pos[2].x - ip.pos[2].x) + (jp.pos[2].y - ip.pos[2].y);
float r2 = dx*dx + dy*dy + dz*dz;
// if(r2==0.f) continue;
float pij = jp.mass * rsqrtf(r2);
// phii = float2_accum(phii, pij);
if(r2 > 0.f) phii = float2_accum(phii, pij);
}
phii = float2_regularize(phii);
}
phi[i] = phii;
}
void gpupot(
int n,
double m[],
double x[][3],
double pot[]){
double t0 = get_wtime();
#if 0
float2 *phi_d, *phi_h;
Particle *ptcl_d, *ptcl_h;
#else
cudaPointer <float2> phi;
cudaPointer <Particle> ptcl;
#endif
int ng = NTHREAD * (n/NTHREAD + (n%NTHREAD ? 1 : 0));
#if 0
hipMalloc ((void **)&phi_d, ng * sizeof(float2));
hipHostMalloc((void **)&phi_h, ng * sizeof(float2));
hipMalloc ((void **)&ptcl_d, ng * sizeof(Particle));
hipHostMalloc((void **)&ptcl_h, ng * sizeof(Particle));
#else
phi.allocate(ng);
ptcl.allocate(ng);
#endif
// std::cout << n << " " << ng << std::endl;
for(int i=0; i<n; i++){
// ptcl_h[i] = Particle(x[i], m[i]);
ptcl[i] = Particle(x[i], m[i]);
}
for(int i=n; i<ng; i++){
// ptcl_h[i] = Particle(0);
ptcl[i] = Particle(0);
}
// hipMemcpy(ptcl_d, ptcl_h, ng * sizeof(Particle), hipMemcpyHostToDevice);
ptcl.htod(ng);
dim3 grid(ng/NTHREAD, 1, 1);
dim3 threads(NTHREAD, 1, 1);
int sharedMemSize = NTHREAD * sizeof(Particle);
// pot_kernel <<<grid, threads, sharedMemSize >>> (n, ptcl_d, phi_d);
hipLaunchKernelGGL(( pot_kernel) , dim3(grid), dim3(threads), sharedMemSize , 0, n, ptcl, phi);
// hipMemcpy(phi_h, phi_d, n * sizeof(float2), hipMemcpyDeviceToHost);
phi.dtoh(n);
for(int i=0; i<n; i++){
// pot[i] = (double)phi_h[i].x + (double)phi_h[i].y;
pot[i] = (double)phi[i].x + (double)phi[i].y;
}
#if 0
hipFree (phi_d);
hipHostFree(phi_h);
hipFree (ptcl_d);
hipHostFree(ptcl_h);
#else
phi.free();
ptcl.free();
#endif
double t1 = get_wtime();
#ifdef PROFILE
fprintf(stderr, "gpupot: %f sec\n", t1 - t0);
#endif
}
extern "C"{
void gpupot_(
int *n,
double m[],
double x[][3],
double pot[]){
gpupot(*n, m, x, pot);
}
}
| 6b201deddda47f2c5f245da232715e80843453cf.cu | // #include <iostream>
#include <cstdio>
#include <cutil.h>
#include "cuda_pointer.h"
#define NTHREAD 128
#define PROFILE
#ifdef PROFILE
#include <sys/time.h>
static double get_wtime(){
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + 1.e-6 * tv.tv_usec;
}
#else
static double get_wtime(){
return 0.0;
}
#endif
static float2 float2_split(double x){
const int shift = 20;
float2 ret;
x *= (1<<shift);
double xi = (int)x;
double xf = x - xi;
ret.x = xi * (1./(1<<shift));
ret.y = xf * (1./(1<<shift));
return ret;
}
static __device__ float2 float2_accum(float2 acc, float x){
float tmp = acc.x + x;
acc.y -= (tmp - acc.x) - x;
acc.x = tmp;
return acc;
}
static __device__ float2 float2_regularize(float2 acc){
float tmp = acc.x + acc.y;
acc.y = acc.y -(tmp - acc.x);
acc.x = tmp;
return acc;
}
struct Particle{
float2 pos[3];
float mass;
float pad;
Particle(double x[3], double m){
pos[0] = float2_split(x[0]);
pos[1] = float2_split(x[1]);
pos[2] = float2_split(x[2]);
mass = (float)m;
}
Particle(int){
pos[0].x = pos[0].y = pos[1].x = pos[1].y = pos[2].x = pos[2].y = mass = pad = 0.f;
}
__device__ Particle() {}
};
__global__ void pot_kernel(int n, Particle *ptcl, float2 *phi){
__shared__ Particle jpbuf[NTHREAD];
int i = NTHREAD * blockIdx.x + threadIdx.x;
Particle ip = ptcl[i];
float2 phii = make_float2(0.f, 0.f);
for(int j=0; j<n; j+= NTHREAD){
__syncthreads();
jpbuf[threadIdx.x] = ptcl[j + threadIdx.x];
__syncthreads();
#pragma unroll 4
for(int jj=0; jj<NTHREAD; jj++){
// if(j+jj == i) continue;
Particle &jp = jpbuf[jj];
float dx = (jp.pos[0].x - ip.pos[0].x) + (jp.pos[0].y - ip.pos[0].y);
float dy = (jp.pos[1].x - ip.pos[1].x) + (jp.pos[1].y - ip.pos[1].y);
float dz = (jp.pos[2].x - ip.pos[2].x) + (jp.pos[2].y - ip.pos[2].y);
float r2 = dx*dx + dy*dy + dz*dz;
// if(r2==0.f) continue;
float pij = jp.mass * rsqrtf(r2);
// phii = float2_accum(phii, pij);
if(r2 > 0.f) phii = float2_accum(phii, pij);
}
phii = float2_regularize(phii);
}
phi[i] = phii;
}
void gpupot(
int n,
double m[],
double x[][3],
double pot[]){
double t0 = get_wtime();
#if 0
float2 *phi_d, *phi_h;
Particle *ptcl_d, *ptcl_h;
#else
cudaPointer <float2> phi;
cudaPointer <Particle> ptcl;
#endif
int ng = NTHREAD * (n/NTHREAD + (n%NTHREAD ? 1 : 0));
#if 0
cudaMalloc ((void **)&phi_d, ng * sizeof(float2));
cudaMallocHost((void **)&phi_h, ng * sizeof(float2));
cudaMalloc ((void **)&ptcl_d, ng * sizeof(Particle));
cudaMallocHost((void **)&ptcl_h, ng * sizeof(Particle));
#else
phi.allocate(ng);
ptcl.allocate(ng);
#endif
// std::cout << n << " " << ng << std::endl;
for(int i=0; i<n; i++){
// ptcl_h[i] = Particle(x[i], m[i]);
ptcl[i] = Particle(x[i], m[i]);
}
for(int i=n; i<ng; i++){
// ptcl_h[i] = Particle(0);
ptcl[i] = Particle(0);
}
// cudaMemcpy(ptcl_d, ptcl_h, ng * sizeof(Particle), cudaMemcpyHostToDevice);
ptcl.htod(ng);
dim3 grid(ng/NTHREAD, 1, 1);
dim3 threads(NTHREAD, 1, 1);
int sharedMemSize = NTHREAD * sizeof(Particle);
// pot_kernel <<<grid, threads, sharedMemSize >>> (n, ptcl_d, phi_d);
pot_kernel <<<grid, threads, sharedMemSize >>> (n, ptcl, phi);
// cudaMemcpy(phi_h, phi_d, n * sizeof(float2), cudaMemcpyDeviceToHost);
phi.dtoh(n);
for(int i=0; i<n; i++){
// pot[i] = (double)phi_h[i].x + (double)phi_h[i].y;
pot[i] = (double)phi[i].x + (double)phi[i].y;
}
#if 0
cudaFree (phi_d);
cudaFreeHost(phi_h);
cudaFree (ptcl_d);
cudaFreeHost(ptcl_h);
#else
phi.free();
ptcl.free();
#endif
double t1 = get_wtime();
#ifdef PROFILE
fprintf(stderr, "gpupot: %f sec\n", t1 - t0);
#endif
}
extern "C"{
void gpupot_(
int *n,
double m[],
double x[][3],
double pot[]){
gpupot(*n, m, x, pot);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.